diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-23 17:59:25 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-01-23 17:59:25 -0800 |
commit | 7c1badb2a9902ab4c5e9fe4093e532eeb11fd9fc (patch) | |
tree | aa654c40449b9455205d8ecfb48dd52ca2f511cf | |
parent | e8744fbc83188693f3590020b14d50df3387fc5a (diff) | |
parent | 66611c0475709607f398e2a5d691b1fc72fe9dfc (diff) |
Merge tag 'ftrace-v6.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull fgraph updates from Steven Rostedt:
"Remove calltime and rettime from fgraph infrastructure
The calltime and rettime were used by the function graph tracer to
calculate the timings of functions where it traced their entry and
exit. The calltime and rettime were stored in the generic structures
that were used for the mechanisms to add an entry and exit callback.
Now that function graph infrastructure is used by other subsystems
than just the tracer, the calltime and rettime are not needed for
them. Remove the calltime and rettime from the generic fgraph
infrastructure and have the callers that require them handle them"
* tag 'ftrace-v6.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
fgraph: Remove calltime and rettime from generic operations
-rw-r--r-- | include/linux/ftrace.h | 2 | ||||
-rw-r--r-- | kernel/trace/fgraph.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace.h | 4 | ||||
-rw-r--r-- | kernel/trace/trace_entries.h | 8 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 33 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 6 |
7 files changed, 33 insertions, 26 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 07092dfb21a4..fbabc3d848b3 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -1151,8 +1151,6 @@ struct ftrace_graph_ret { int depth; /* Number of functions that overran the depth limit for current task */ unsigned int overrun; - unsigned long long calltime; - unsigned long long rettime; } __packed; struct fgraph_ops; diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index 9e6b5a71555b..5dddfc2149f6 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -826,7 +826,6 @@ __ftrace_return_to_handler(struct ftrace_regs *fregs, unsigned long frame_pointe return (unsigned long)panic; } - trace.rettime = trace_clock_local(); if (fregs) ftrace_regs_set_instruction_pointer(fregs, ret); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2883dd6edd24..9c21ba45b7af 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -924,7 +924,9 @@ extern int __trace_graph_retaddr_entry(struct trace_array *tr, unsigned long retaddr); extern void __trace_graph_return(struct trace_array *tr, struct ftrace_graph_ret *trace, - unsigned int trace_ctx); + unsigned int trace_ctx, + u64 calltime, u64 rettime); + extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); extern void free_fgraph_ops(struct trace_array *tr); diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 82fd174ebbe0..fbfb396905a6 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -124,8 +124,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry, __field_packed( unsigned long, ret, retval ) __field_packed( int, ret, depth ) __field_packed( unsigned int, ret, overrun ) - __field_packed( unsigned long long, ret, calltime) - __field_packed( unsigned long long, ret, rettime ) + __field(unsigned long long, calltime ) + __field(unsigned long long, rettime ) ), F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d retval: %lx", @@ -146,8 +146,8 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry, __field_packed( unsigned long, ret, func ) __field_packed( int, ret, depth ) __field_packed( unsigned int, ret, overrun ) - __field_packed( unsigned long long, ret, calltime) - __field_packed( unsigned long long, ret, rettime ) + __field(unsigned long long, calltime ) + __field(unsigned long long, rettime ) ), F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d", diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index dc62eb93837a..54d850997c0a 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -266,12 +266,10 @@ __trace_graph_function(struct trace_array *tr, struct ftrace_graph_ret ret = { .func = ip, .depth = 0, - .calltime = time, - .rettime = time, }; __trace_graph_entry(tr, &ent, trace_ctx); - __trace_graph_return(tr, &ret, trace_ctx); + __trace_graph_return(tr, &ret, trace_ctx, time, time); } void @@ -283,8 +281,9 @@ trace_graph_function(struct trace_array *tr, } void __trace_graph_return(struct trace_array *tr, - struct ftrace_graph_ret *trace, - unsigned int trace_ctx) + struct ftrace_graph_ret *trace, + unsigned int trace_ctx, + u64 calltime, u64 rettime) { struct ring_buffer_event *event; struct trace_buffer *buffer = tr->array_buffer.buffer; @@ -296,6 +295,8 @@ void __trace_graph_return(struct trace_array *tr, return; entry = ring_buffer_event_data(event); entry->ret = *trace; + entry->calltime = calltime; + entry->rettime = rettime; trace_buffer_unlock_commit_nostack(buffer, event); } @@ -317,10 +318,13 @@ void trace_graph_return(struct ftrace_graph_ret *trace, struct trace_array_cpu *data; struct fgraph_times *ftimes; unsigned int trace_ctx; + u64 calltime, rettime; long disabled; int size; int cpu; + rettime = trace_clock_local(); + ftrace_graph_addr_finish(gops, trace); if (*task_var & TRACE_GRAPH_NOTRACE) { @@ -334,7 +338,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace, handle_nosleeptime(trace, ftimes, size); - trace->calltime = ftimes->calltime; + calltime = ftimes->calltime; preempt_disable_notrace(); cpu = raw_smp_processor_id(); @@ -342,7 +346,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace, disabled = atomic_read(&data->disabled); if (likely(!disabled)) { trace_ctx = tracing_gen_ctx(); - __trace_graph_return(tr, trace, trace_ctx); + __trace_graph_return(tr, trace, trace_ctx, calltime, rettime); } preempt_enable_notrace(); } @@ -367,10 +371,8 @@ static void trace_graph_thresh_return(struct ftrace_graph_ret *trace, handle_nosleeptime(trace, ftimes, size); - trace->calltime = ftimes->calltime; - if (tracing_thresh && - (trace->rettime - ftimes->calltime < tracing_thresh)) + (trace_clock_local() - ftimes->calltime < tracing_thresh)) return; else trace_graph_return(trace, gops, fregs); @@ -856,7 +858,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, graph_ret = &ret_entry->ret; call = &entry->graph_ent; - duration = graph_ret->rettime - graph_ret->calltime; + duration = ret_entry->rettime - ret_entry->calltime; func = call->func + iter->tr->text_delta; @@ -1137,11 +1139,14 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, } static enum print_line_t -print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, +print_graph_return(struct ftrace_graph_ret_entry *retentry, struct trace_seq *s, struct trace_entry *ent, struct trace_iterator *iter, u32 flags) { - unsigned long long duration = trace->rettime - trace->calltime; + struct ftrace_graph_ret *trace = &retentry->ret; + u64 calltime = retentry->calltime; + u64 rettime = retentry->rettime; + unsigned long long duration = rettime - calltime; struct fgraph_data *data = iter->private; struct trace_array *tr = iter->tr; unsigned long func; @@ -1342,7 +1347,7 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags) case TRACE_GRAPH_RET: { struct ftrace_graph_ret_entry *field; trace_assign_type(field, entry); - return print_graph_return(&field->ret, s, entry, iter, flags); + return print_graph_return(field, s, entry, iter, flags); } case TRACE_STACK: case TRACE_FN: diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 08786c59d397..7294ad676379 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -223,6 +223,7 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace, unsigned long flags; unsigned int trace_ctx; u64 *calltime; + u64 rettime; int size; ftrace_graph_addr_finish(gops, trace); @@ -230,13 +231,13 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace, if (!func_prolog_dec(tr, &data, &flags)) return; + rettime = trace_clock_local(); calltime = fgraph_retrieve_data(gops->idx, &size); if (!calltime) return; - trace->calltime = *calltime; trace_ctx = tracing_gen_ctx_flags(flags); - __trace_graph_return(tr, trace, trace_ctx); + __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime); atomic_dec(&data->disabled); } diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index f372252dc8bb..af30586f1aea 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -158,6 +158,7 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace, struct trace_array_cpu *data; unsigned int trace_ctx; u64 *calltime; + u64 rettime; int size; ftrace_graph_addr_finish(gops, trace); @@ -165,12 +166,13 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace, if (!func_prolog_preempt_disable(tr, &data, &trace_ctx)) return; + rettime = trace_clock_local(); + calltime = fgraph_retrieve_data(gops->idx, &size); if (!calltime) return; - trace->calltime = *calltime; - __trace_graph_return(tr, trace, trace_ctx); + __trace_graph_return(tr, trace, trace_ctx, *calltime, rettime); atomic_dec(&data->disabled); preempt_enable_notrace(); |