Skip to content

Commit 4740974

Browse files
Steven Rostedtrostedt
authored andcommitted
ftrace: Add default recursion protection for function tracing
As more users of the function tracer utility are being added, they do not always add the necessary recursion protection. To protect from function recursion due to tracing, if the callback ftrace_ops does not specifically specify that it protects against recursion (by setting the FTRACE_OPS_FL_RECURSION_SAFE flag), the list operation will be called by the mcount trampoline which adds recursion protection. If the flag is set, then the function will be called directly with no extra protection. Note, the list operation is called if more than one function callback is registered, or if the arch does not support all of the function tracer features. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
1 parent 5767cfe commit 4740974

8 files changed

Lines changed: 24 additions & 8 deletions

File tree

include/linux/ftrace.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,10 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
8585
* passing regs to the handler.
8686
* Note, if this flag is set, the SAVE_REGS flag will automatically
8787
* get set upon registering the ftrace_ops, if the arch supports it.
88+
* RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
89+
* that the call back has its own recursion protection. If it does
90+
* not set this, then the ftrace infrastructure will add recursion
91+
* protection for the caller.
8892
*/
8993
enum {
9094
FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -93,6 +97,7 @@ enum {
9397
FTRACE_OPS_FL_CONTROL = 1 << 3,
9498
FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
9599
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
100+
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
96101
};
97102

98103
struct ftrace_ops {

kernel/trace/ftrace.c

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@
6666

6767
static struct ftrace_ops ftrace_list_end __read_mostly = {
6868
.func = ftrace_stub,
69+
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
6970
};
7071

7172
/* ftrace_enabled is a method to turn ftrace on or off */
@@ -221,12 +222,13 @@ static void update_ftrace_function(void)
221222

222223
/*
223224
* If we are at the end of the list and this ops is
224-
* not dynamic and the arch supports passing ops, then have the
225-
* mcount trampoline call the function directly.
225+
* recursion safe and not dynamic and the arch supports passing ops,
226+
* then have the mcount trampoline call the function directly.
226227
*/
227228
if (ftrace_ops_list == &ftrace_list_end ||
228229
(ftrace_ops_list->next == &ftrace_list_end &&
229230
!(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
231+
(ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
230232
!FTRACE_FORCE_LIST_FUNC)) {
231233
/* Set the ftrace_ops that the arch callback uses */
232234
if (ftrace_ops_list == &global_ops)
@@ -867,6 +869,7 @@ static void unregister_ftrace_profiler(void)
867869
#else
868870
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
869871
.func = function_profile_call,
872+
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
870873
};
871874

872875
static int register_ftrace_profiler(void)
@@ -1049,6 +1052,7 @@ static struct ftrace_ops global_ops = {
10491052
.func = ftrace_stub,
10501053
.notrace_hash = EMPTY_HASH,
10511054
.filter_hash = EMPTY_HASH,
1055+
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
10521056
};
10531057

10541058
static DEFINE_MUTEX(ftrace_regex_lock);
@@ -3967,6 +3971,7 @@ void __init ftrace_init(void)
39673971

39683972
static struct ftrace_ops global_ops = {
39693973
.func = ftrace_stub,
3974+
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
39703975
};
39713976

39723977
static int __init ftrace_nodyn_init(void)
@@ -4023,6 +4028,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
40234028

40244029
static struct ftrace_ops control_ops = {
40254030
.func = ftrace_ops_control_func,
4031+
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
40264032
};
40274033

40284034
static inline void

kernel/trace/trace_events.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1721,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
17211721
static struct ftrace_ops trace_ops __initdata =
17221722
{
17231723
.func = function_test_events_call,
1724+
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
17241725
};
17251726

17261727
static __init void event_trace_self_test_with_function(void)

kernel/trace/trace_functions.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -153,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
153153
static struct ftrace_ops trace_ops __read_mostly =
154154
{
155155
.func = function_trace_call,
156-
.flags = FTRACE_OPS_FL_GLOBAL,
156+
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
157157
};
158158

159159
static struct ftrace_ops trace_stack_ops __read_mostly =
160160
{
161161
.func = function_stack_trace_call,
162-
.flags = FTRACE_OPS_FL_GLOBAL,
162+
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
163163
};
164164

165165
/* Our two options */

kernel/trace/trace_irqsoff.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
154154
static struct ftrace_ops trace_ops __read_mostly =
155155
{
156156
.func = irqsoff_tracer_call,
157-
.flags = FTRACE_OPS_FL_GLOBAL,
157+
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
158158
};
159159
#endif /* CONFIG_FUNCTION_TRACER */
160160

kernel/trace/trace_sched_wakeup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
130130
static struct ftrace_ops trace_ops __read_mostly =
131131
{
132132
.func = wakeup_tracer_call,
133-
.flags = FTRACE_OPS_FL_GLOBAL,
133+
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
134134
};
135135
#endif /* CONFIG_FUNCTION_TRACER */
136136

kernel/trace/trace_selftest.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -148,19 +148,22 @@ static void trace_selftest_test_dyn_func(unsigned long ip,
148148

149149
static struct ftrace_ops test_probe1 = {
150150
.func = trace_selftest_test_probe1_func,
151+
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
151152
};
152153

153154
static struct ftrace_ops test_probe2 = {
154155
.func = trace_selftest_test_probe2_func,
156+
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
155157
};
156158

157159
static struct ftrace_ops test_probe3 = {
158160
.func = trace_selftest_test_probe3_func,
161+
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
159162
};
160163

161164
static struct ftrace_ops test_global = {
162-
.func = trace_selftest_test_global_func,
163-
.flags = FTRACE_OPS_FL_GLOBAL,
165+
.func = trace_selftest_test_global_func,
166+
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
164167
};
165168

166169
static void print_counts(void)

kernel/trace/trace_stack.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
137137
static struct ftrace_ops trace_ops __read_mostly =
138138
{
139139
.func = stack_trace_call,
140+
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
140141
};
141142

142143
static ssize_t

0 commit comments

Comments
 (0)