sched,rt: Use cpumask_any*_distribute()
Replace a bunch of cpumask_any*() instances with cpumask_any*_distribute(), by injecting this little bit of random in cpu selection, we reduce the chance two competing balance operations working off the same lowest_mask pick the same CPU. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com> Link: https://lkml.kernel.org/r/20201023102347.190759694@infradead.org
This commit is contained in:
parent
3015ef4b98
commit
14e292f8d4
|
@ -199,6 +199,11 @@ static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
|
|||
return cpumask_next_and(-1, src1p, src2p);
|
||||
}
|
||||
|
||||
static inline int cpumask_any_distribute(const struct cpumask *srcp)
|
||||
{
|
||||
return cpumask_first(srcp);
|
||||
}
|
||||
|
||||
#define for_each_cpu(cpu, mask) \
|
||||
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
|
||||
#define for_each_cpu_not(cpu, mask) \
|
||||
|
@ -252,6 +257,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
|
|||
unsigned int cpumask_local_spread(unsigned int i, int node);
|
||||
int cpumask_any_and_distribute(const struct cpumask *src1p,
|
||||
const struct cpumask *src2p);
|
||||
int cpumask_any_distribute(const struct cpumask *srcp);
|
||||
|
||||
/**
|
||||
* for_each_cpu - iterate over every cpu in a mask
|
||||
|
|
|
@ -2002,8 +2002,8 @@ static int find_later_rq(struct task_struct *task)
|
|||
return this_cpu;
|
||||
}
|
||||
|
||||
best_cpu = cpumask_first_and(later_mask,
|
||||
sched_domain_span(sd));
|
||||
best_cpu = cpumask_any_and_distribute(later_mask,
|
||||
sched_domain_span(sd));
|
||||
/*
|
||||
* Last chance: if a CPU being in both later_mask
|
||||
* and current sd span is valid, that becomes our
|
||||
|
@ -2025,7 +2025,7 @@ static int find_later_rq(struct task_struct *task)
|
|||
if (this_cpu != -1)
|
||||
return this_cpu;
|
||||
|
||||
cpu = cpumask_any(later_mask);
|
||||
cpu = cpumask_any_distribute(later_mask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
return cpu;
|
||||
|
||||
|
|
|
@ -1752,8 +1752,8 @@ static int find_lowest_rq(struct task_struct *task)
|
|||
return this_cpu;
|
||||
}
|
||||
|
||||
best_cpu = cpumask_first_and(lowest_mask,
|
||||
sched_domain_span(sd));
|
||||
best_cpu = cpumask_any_and_distribute(lowest_mask,
|
||||
sched_domain_span(sd));
|
||||
if (best_cpu < nr_cpu_ids) {
|
||||
rcu_read_unlock();
|
||||
return best_cpu;
|
||||
|
@ -1770,7 +1770,7 @@ static int find_lowest_rq(struct task_struct *task)
|
|||
if (this_cpu != -1)
|
||||
return this_cpu;
|
||||
|
||||
cpu = cpumask_any(lowest_mask);
|
||||
cpu = cpumask_any_distribute(lowest_mask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
return cpu;
|
||||
|
||||
|
|
|
@ -267,3 +267,21 @@ int cpumask_any_and_distribute(const struct cpumask *src1p,
|
|||
return next;
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_any_and_distribute);
|
||||
|
||||
int cpumask_any_distribute(const struct cpumask *srcp)
|
||||
{
|
||||
int next, prev;
|
||||
|
||||
/* NOTE: our first selection will skip 0. */
|
||||
prev = __this_cpu_read(distribute_cpu_mask_prev);
|
||||
|
||||
next = cpumask_next(prev, srcp);
|
||||
if (next >= nr_cpu_ids)
|
||||
next = cpumask_first(srcp);
|
||||
|
||||
if (next < nr_cpu_ids)
|
||||
__this_cpu_write(distribute_cpu_mask_prev, next);
|
||||
|
||||
return next;
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_any_distribute);
|
||||
|
|
Loading…
Reference in New Issue