smp, cpumask: Use non-atomic cpumask_{set,clear}_cpu()

The cpumasks in smp_call_function_many() are private and not subject
to concurrency, atomic bitops are pointless and expensive.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2017-05-19 12:58:25 +02:00 committed by Ingo Molnar
parent 3fc5b3b6a8
commit 6c8557bdb2
2 changed files with 13 additions and 2 deletions

View File

@ -293,6 +293,12 @@ static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
set_bit(cpumask_check(cpu), cpumask_bits(dstp)); set_bit(cpumask_check(cpu), cpumask_bits(dstp));
} }
static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
/** /**
* cpumask_clear_cpu - clear a cpu in a cpumask * cpumask_clear_cpu - clear a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids) * @cpu: cpu number (< nr_cpu_ids)
@ -303,6 +309,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
} }
static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
/** /**
* cpumask_test_cpu - test for a cpu in a cpumask * cpumask_test_cpu - test for a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids) * @cpu: cpu number (< nr_cpu_ids)

View File

@ -436,7 +436,7 @@ void smp_call_function_many(const struct cpumask *mask,
cfd = this_cpu_ptr(&cfd_data); cfd = this_cpu_ptr(&cfd_data);
cpumask_and(cfd->cpumask, mask, cpu_online_mask); cpumask_and(cfd->cpumask, mask, cpu_online_mask);
cpumask_clear_cpu(this_cpu, cfd->cpumask); __cpumask_clear_cpu(this_cpu, cfd->cpumask);
/* Some callers race with other cpus changing the passed mask */ /* Some callers race with other cpus changing the passed mask */
if (unlikely(!cpumask_weight(cfd->cpumask))) if (unlikely(!cpumask_weight(cfd->cpumask)))
@ -452,7 +452,7 @@ void smp_call_function_many(const struct cpumask *mask,
csd->func = func; csd->func = func;
csd->info = info; csd->info = info;
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
cpumask_set_cpu(cpu, cfd->cpumask_ipi); __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
} }
/* Send a message to all CPUs in the map */ /* Send a message to all CPUs in the map */