summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2025-05-05 17:21:11 -0400
committerSteven Rostedt (Google) <rostedt@goodmis.org>2025-05-09 15:19:10 -0400
commitf62e3de375150210335a063605ce0dd6a6746b78 (patch)
tree0e87a1091a81a6e8632ecc29124483d530de8de3
parenta9839d204896c59b0e2ae08e571515b1cf752bd1 (diff)
ftrace: Do not disabled function graph based on "disabled" field
The per CPU "disabled" value was the original way to disable tracing when the tracing subsystem was first created. Today, the ring buffer infrastructure has its own way to disable tracing. In fact, things have changed so much since 2008 that many things ignore the disable flag. Do not bother disabling the function graph tracer if the per CPU disabled field is set. Just record as normal. If tracing is disabled in the ring buffer it will not be recorded. Also, when tracing is enabled again, it will not drop the return call of the function. Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Andrew Morton <akpm@linux-foundation.org> Link: https://lore.kernel.org/20250505212235.715752008@goodmis.org Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
-rw-r--r--kernel/trace/trace_functions_graph.c38
1 files changed, 9 insertions, 29 deletions
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 0c357a89c58e..9234e2c39abf 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -202,12 +202,9 @@ static int graph_entry(struct ftrace_graph_ent *trace,
{
unsigned long *task_var = fgraph_get_task_var(gops);
struct trace_array *tr = gops->private;
- struct trace_array_cpu *data;
struct fgraph_times *ftimes;
unsigned int trace_ctx;
- long disabled;
int ret = 0;
- int cpu;
if (*task_var & TRACE_GRAPH_NOTRACE)
return 0;
@@ -257,21 +254,14 @@ static int graph_entry(struct ftrace_graph_ent *trace,
if (tracing_thresh)
return 1;
- preempt_disable_notrace();
- cpu = raw_smp_processor_id();
- data = per_cpu_ptr(tr->array_buffer.data, cpu);
- disabled = atomic_read(&data->disabled);
- if (likely(!disabled)) {
- trace_ctx = tracing_gen_ctx();
- if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
- tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
- unsigned long retaddr = ftrace_graph_top_ret_addr(current);
- ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
- } else {
- ret = __graph_entry(tr, trace, trace_ctx, fregs);
- }
+ trace_ctx = tracing_gen_ctx();
+ if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
+ tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
+ unsigned long retaddr = ftrace_graph_top_ret_addr(current);
+ ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
+ } else {
+ ret = __graph_entry(tr, trace, trace_ctx, fregs);
}
- preempt_enable_notrace();
return ret;
}
@@ -351,13 +341,10 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
{
unsigned long *task_var = fgraph_get_task_var(gops);
struct trace_array *tr = gops->private;
- struct trace_array_cpu *data;
struct fgraph_times *ftimes;
unsigned int trace_ctx;
u64 calltime, rettime;
- long disabled;
int size;
- int cpu;
rettime = trace_clock_local();
@@ -376,15 +363,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
calltime = ftimes->calltime;
- preempt_disable_notrace();
- cpu = raw_smp_processor_id();
- data = per_cpu_ptr(tr->array_buffer.data, cpu);
- disabled = atomic_read(&data->disabled);
- if (likely(!disabled)) {
- trace_ctx = tracing_gen_ctx();
- __trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
- }
- preempt_enable_notrace();
+ trace_ctx = tracing_gen_ctx();
+ __trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
}
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,