ftrace: Introduce PERMANENT ftrace_ops flag
Livepatch uses ftrace for redirection to new patched functions. It means that if ftrace is disabled, all live patched functions are disabled as well. Toggling global 'ftrace_enabled' sysctl thus affect it directly. It is not a problem per se, because only administrator can set sysctl values, but it still may be surprising. Introduce PERMANENT ftrace_ops flag to amend this. If the FTRACE_OPS_FL_PERMANENT is set on any ftrace ops, the tracing cannot be disabled by disabling ftrace_enabled. Equally, a callback with the flag set cannot be registered if ftrace_enabled is disabled. Link: http://lkml.kernel.org/r/20191016113316.13415-2-mbenes@suse.cz Reviewed-by: Petr Mladek <pmladek@suse.com> Reviewed-by: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com> Signed-off-by: Miroslav Benes <mbenes@suse.cz> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
This commit is contained in:
parent
a99d8080aa
commit
7162431dcf
|
@ -170,6 +170,14 @@ FTRACE_OPS_FL_RCU
|
|||
a callback may be executed and RCU synchronization will not protect
|
||||
it.
|
||||
|
||||
FTRACE_OPS_FL_PERMANENT
|
||||
If this is set on any ftrace ops, then the tracing cannot disabled by
|
||||
writing 0 to the proc sysctl ftrace_enabled. Equally, a callback with
|
||||
the flag set cannot be registered if ftrace_enabled is 0.
|
||||
|
||||
Livepatch uses it not to lose the function redirection, so the system
|
||||
stays protected.
|
||||
|
||||
|
||||
Filtering which functions to trace
|
||||
==================================
|
||||
|
|
|
@ -2976,7 +2976,9 @@ Note, the proc sysctl ftrace_enable is a big on/off switch for the
|
|||
function tracer. By default it is enabled (when function tracing is
|
||||
enabled in the kernel). If it is disabled, all function tracing is
|
||||
disabled. This includes not only the function tracers for ftrace, but
|
||||
also for any other uses (perf, kprobes, stack tracing, profiling, etc).
|
||||
also for any other uses (perf, kprobes, stack tracing, profiling, etc). It
|
||||
cannot be disabled if there is a callback with FTRACE_OPS_FL_PERMANENT set
|
||||
registered.
|
||||
|
||||
Please disable this with care.
|
||||
|
||||
|
|
|
@ -142,6 +142,8 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
|
|||
* PID - Is affected by set_ftrace_pid (allows filtering on those pids)
|
||||
* RCU - Set when the ops can only be called when RCU is watching.
|
||||
* TRACE_ARRAY - The ops->private points to a trace_array descriptor.
|
||||
* PERMANENT - Set when the ops is permanent and should not be affected by
|
||||
* ftrace_enabled.
|
||||
*/
|
||||
enum {
|
||||
FTRACE_OPS_FL_ENABLED = 1 << 0,
|
||||
|
@ -160,6 +162,7 @@ enum {
|
|||
FTRACE_OPS_FL_PID = 1 << 13,
|
||||
FTRACE_OPS_FL_RCU = 1 << 14,
|
||||
FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15,
|
||||
FTRACE_OPS_FL_PERMANENT = 1 << 16,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
|
|
@ -196,7 +196,8 @@ static int klp_patch_func(struct klp_func *func)
|
|||
ops->fops.func = klp_ftrace_handler;
|
||||
ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
|
||||
FTRACE_OPS_FL_DYNAMIC |
|
||||
FTRACE_OPS_FL_IPMODIFY;
|
||||
FTRACE_OPS_FL_IPMODIFY |
|
||||
FTRACE_OPS_FL_PERMANENT;
|
||||
|
||||
list_add(&ops->node, &klp_ops);
|
||||
|
||||
|
|
|
@ -326,6 +326,8 @@ int __register_ftrace_function(struct ftrace_ops *ops)
|
|||
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
|
||||
ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
|
||||
#endif
|
||||
if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
|
||||
return -EBUSY;
|
||||
|
||||
if (!core_kernel_data((unsigned long)ops))
|
||||
ops->flags |= FTRACE_OPS_FL_DYNAMIC;
|
||||
|
@ -6754,6 +6756,18 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_ftrace_function);
|
||||
|
||||
static bool is_permanent_ops_registered(void)
|
||||
{
|
||||
struct ftrace_ops *op;
|
||||
|
||||
do_for_each_ftrace_op(op, ftrace_ops_list) {
|
||||
if (op->flags & FTRACE_OPS_FL_PERMANENT)
|
||||
return true;
|
||||
} while_for_each_ftrace_op(op);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int
|
||||
ftrace_enable_sysctl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
|
@ -6771,8 +6785,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
|
|||
if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
|
||||
goto out;
|
||||
|
||||
last_ftrace_enabled = !!ftrace_enabled;
|
||||
|
||||
if (ftrace_enabled) {
|
||||
|
||||
/* we are starting ftrace again */
|
||||
|
@ -6783,12 +6795,19 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
|
|||
ftrace_startup_sysctl();
|
||||
|
||||
} else {
|
||||
if (is_permanent_ops_registered()) {
|
||||
ftrace_enabled = true;
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* stopping ftrace calls (just send to ftrace_stub) */
|
||||
ftrace_trace_function = ftrace_stub;
|
||||
|
||||
ftrace_shutdown_sysctl();
|
||||
}
|
||||
|
||||
last_ftrace_enabled = !!ftrace_enabled;
|
||||
out:
|
||||
mutex_unlock(&ftrace_lock);
|
||||
return ret;
|
||||
|
|
Loading…
Reference in New Issue