tracing: Replace synchronize_sched() and call_rcu_sched()

Now that synchronize_rcu() waits for preempt-disable regions of code
as well as RCU read-side critical sections, synchronize_sched() can
be replaced by synchronize_rcu().  Similarly, call_rcu_sched() can be
replaced by call_rcu().  This commit therefore makes these changes.

Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: <linux-kernel@vger.kernel.org>
Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
Paul E. McKenney 2018-11-06 18:44:52 -08:00
parent c93ffc15cc
commit 7440172974
7 changed files with 29 additions and 29 deletions

View File

@ -82,7 +82,7 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
static inline void tracepoint_synchronize_unregister(void) static inline void tracepoint_synchronize_unregister(void)
{ {
synchronize_srcu(&tracepoint_srcu); synchronize_srcu(&tracepoint_srcu);
synchronize_sched(); synchronize_rcu();
} }
#else #else
static inline void tracepoint_synchronize_unregister(void) static inline void tracepoint_synchronize_unregister(void)

View File

@ -173,7 +173,7 @@ static void ftrace_sync(struct work_struct *work)
{ {
/* /*
* This function is just a stub to implement a hard force * This function is just a stub to implement a hard force
* of synchronize_sched(). This requires synchronizing * of synchronize_rcu(). This requires synchronizing
* tasks even in userspace and idle. * tasks even in userspace and idle.
* *
* Yes, function tracing is rude. * Yes, function tracing is rude.
@ -934,7 +934,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
ftrace_profile_enabled = 0; ftrace_profile_enabled = 0;
/* /*
* unregister_ftrace_profiler calls stop_machine * unregister_ftrace_profiler calls stop_machine
* so this acts like an synchronize_sched. * so this acts like an synchronize_rcu.
*/ */
unregister_ftrace_profiler(); unregister_ftrace_profiler();
} }
@ -1086,7 +1086,7 @@ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
/* /*
* Some of the ops may be dynamically allocated, * Some of the ops may be dynamically allocated,
* they are freed after a synchronize_sched(). * they are freed after a synchronize_rcu().
*/ */
preempt_disable_notrace(); preempt_disable_notrace();
@ -1286,7 +1286,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
{ {
if (!hash || hash == EMPTY_HASH) if (!hash || hash == EMPTY_HASH)
return; return;
call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
} }
void ftrace_free_filter(struct ftrace_ops *ops) void ftrace_free_filter(struct ftrace_ops *ops)
@ -1501,7 +1501,7 @@ static bool hash_contains_ip(unsigned long ip,
* the ip is not in the ops->notrace_hash. * the ip is not in the ops->notrace_hash.
* *
* This needs to be called with preemption disabled as * This needs to be called with preemption disabled as
* the hashes are freed with call_rcu_sched(). * the hashes are freed with call_rcu().
*/ */
static int static int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
@ -4496,7 +4496,7 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
if (ftrace_enabled && !ftrace_hash_empty(hash)) if (ftrace_enabled && !ftrace_hash_empty(hash))
ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
&old_hash_ops); &old_hash_ops);
synchronize_sched(); synchronize_rcu();
hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
hlist_del(&entry->hlist); hlist_del(&entry->hlist);
@ -5314,7 +5314,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
mutex_unlock(&graph_lock); mutex_unlock(&graph_lock);
/* Wait till all users are no longer using the old hash */ /* Wait till all users are no longer using the old hash */
synchronize_sched(); synchronize_rcu();
free_ftrace_hash(old_hash); free_ftrace_hash(old_hash);
} }
@ -5707,7 +5707,7 @@ void ftrace_release_mod(struct module *mod)
list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
if (mod_map->mod == mod) { if (mod_map->mod == mod) {
list_del_rcu(&mod_map->list); list_del_rcu(&mod_map->list);
call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map); call_rcu(&mod_map->rcu, ftrace_free_mod_map);
break; break;
} }
} }
@ -5927,7 +5927,7 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
struct ftrace_mod_map *mod_map; struct ftrace_mod_map *mod_map;
const char *ret = NULL; const char *ret = NULL;
/* mod_map is freed via call_rcu_sched() */ /* mod_map is freed via call_rcu() */
preempt_disable(); preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
@ -6262,7 +6262,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
/* /*
* Some of the ops may be dynamically allocated, * Some of the ops may be dynamically allocated,
* they must be freed after a synchronize_sched(). * they must be freed after a synchronize_rcu().
*/ */
preempt_disable_notrace(); preempt_disable_notrace();
@ -6433,7 +6433,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
rcu_assign_pointer(tr->function_pids, NULL); rcu_assign_pointer(tr->function_pids, NULL);
/* Wait till all users are no longer using pid filtering */ /* Wait till all users are no longer using pid filtering */
synchronize_sched(); synchronize_rcu();
trace_free_pid_list(pid_list); trace_free_pid_list(pid_list);
} }
@ -6580,7 +6580,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
rcu_assign_pointer(tr->function_pids, pid_list); rcu_assign_pointer(tr->function_pids, pid_list);
if (filtered_pids) { if (filtered_pids) {
synchronize_sched(); synchronize_rcu();
trace_free_pid_list(filtered_pids); trace_free_pid_list(filtered_pids);
} else if (pid_list) { } else if (pid_list) {
/* Register a probe to set whether to ignore the tracing of a task */ /* Register a probe to set whether to ignore the tracing of a task */

View File

@ -1834,7 +1834,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
* There could have been a race between checking * There could have been a race between checking
* record_disable and incrementing it. * record_disable and incrementing it.
*/ */
synchronize_sched(); synchronize_rcu();
for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
rb_check_pages(cpu_buffer); rb_check_pages(cpu_buffer);
@ -3151,7 +3151,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
* This prevents all writes to the buffer. Any attempt to write * This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL. * to the buffer after this will fail and return NULL.
* *
* The caller should call synchronize_sched() after this. * The caller should call synchronize_rcu() after this.
*/ */
void ring_buffer_record_disable(struct ring_buffer *buffer) void ring_buffer_record_disable(struct ring_buffer *buffer)
{ {
@ -3253,7 +3253,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
* This prevents all writes to the buffer. Any attempt to write * This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL. * to the buffer after this will fail and return NULL.
* *
* The caller should call synchronize_sched() after this. * The caller should call synchronize_rcu() after this.
*/ */
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
{ {
@ -4191,7 +4191,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
void void
ring_buffer_read_prepare_sync(void) ring_buffer_read_prepare_sync(void)
{ {
synchronize_sched(); synchronize_rcu();
} }
EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
@ -4363,7 +4363,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
atomic_inc(&cpu_buffer->record_disabled); atomic_inc(&cpu_buffer->record_disabled);
/* Make sure all commits have finished */ /* Make sure all commits have finished */
synchronize_sched(); synchronize_rcu();
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@ -4496,7 +4496,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
goto out; goto out;
/* /*
* We can't do a synchronize_sched here because this * We can't do a synchronize_rcu here because this
* function can be called in atomic context. * function can be called in atomic context.
* Normally this will be called from the same CPU as cpu. * Normally this will be called from the same CPU as cpu.
* If not it's up to the caller to protect this. * If not it's up to the caller to protect this.

View File

@ -1681,7 +1681,7 @@ void tracing_reset(struct trace_buffer *buf, int cpu)
ring_buffer_record_disable(buffer); ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */ /* Make sure all commits have finished */
synchronize_sched(); synchronize_rcu();
ring_buffer_reset_cpu(buffer, cpu); ring_buffer_reset_cpu(buffer, cpu);
ring_buffer_record_enable(buffer); ring_buffer_record_enable(buffer);
@ -1698,7 +1698,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
ring_buffer_record_disable(buffer); ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */ /* Make sure all commits have finished */
synchronize_sched(); synchronize_rcu();
buf->time_start = buffer_ftrace_now(buf, buf->cpu); buf->time_start = buffer_ftrace_now(buf, buf->cpu);
@ -2250,7 +2250,7 @@ void trace_buffered_event_disable(void)
preempt_enable(); preempt_enable();
/* Wait for all current users to finish */ /* Wait for all current users to finish */
synchronize_sched(); synchronize_rcu();
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
@ -5398,7 +5398,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (tr->current_trace->reset) if (tr->current_trace->reset)
tr->current_trace->reset(tr); tr->current_trace->reset(tr);
/* Current trace needs to be nop_trace before synchronize_sched */ /* Current trace needs to be nop_trace before synchronize_rcu */
tr->current_trace = &nop_trace; tr->current_trace = &nop_trace;
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
@ -5412,7 +5412,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
* The update_max_tr is called from interrupts disabled * The update_max_tr is called from interrupts disabled
* so a synchronized_sched() is sufficient. * so a synchronized_sched() is sufficient.
*/ */
synchronize_sched(); synchronize_rcu();
free_snapshot(tr); free_snapshot(tr);
} }
#endif #endif

View File

@ -1614,7 +1614,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir,
/* /*
* The calls can still be using the old filters. * The calls can still be using the old filters.
* Do a synchronize_sched() and to ensure all calls are * Do a synchronize_rcu() and to ensure all calls are
* done with them before we free them. * done with them before we free them.
*/ */
tracepoint_synchronize_unregister(); tracepoint_synchronize_unregister();
@ -1845,7 +1845,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
if (filter) { if (filter) {
/* /*
* No event actually uses the system filter * No event actually uses the system filter
* we can free it without synchronize_sched(). * we can free it without synchronize_rcu().
*/ */
__free_filter(system->filter); __free_filter(system->filter);
system->filter = filter; system->filter = filter;

View File

@ -333,7 +333,7 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
* event_call related objects, which will be accessed in * event_call related objects, which will be accessed in
* the kprobe_trace_func/kretprobe_trace_func. * the kprobe_trace_func/kretprobe_trace_func.
*/ */
synchronize_sched(); synchronize_rcu();
kfree(link); /* Ignored if link == NULL */ kfree(link); /* Ignored if link == NULL */
} }

View File

@ -92,7 +92,7 @@ static __init int release_early_probes(void)
while (early_probes) { while (early_probes) {
tmp = early_probes; tmp = early_probes;
early_probes = tmp->next; early_probes = tmp->next;
call_rcu_sched(tmp, rcu_free_old_probes); call_rcu(tmp, rcu_free_old_probes);
} }
return 0; return 0;
@ -123,7 +123,7 @@ static inline void release_probes(struct tracepoint_func *old)
* cover both cases. So let us chain the SRCU and sched RCU * cover both cases. So let us chain the SRCU and sched RCU
* callbacks to wait for both grace periods. * callbacks to wait for both grace periods.
*/ */
call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes); call_rcu(&tp_probes->rcu, rcu_free_old_probes);
} }
} }