summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBreno Leitao <leitao@debian.org>2025-04-08 11:32:02 -0700
committerJakub Kicinski <kuba@kernel.org>2025-04-10 18:34:05 -0700
commit0f08335ade71273f89d19412268b48b55f3e3726 (patch)
tree5cd2ae7708d894c5823c0482c67db0be54ea697c
parentb1e904999542ad6764eafa54545f1c55776006d1 (diff)
trace: tcp: Add tracepoint for tcp_sendmsg_locked()
Add a tracepoint to monitor TCP send operations, enabling detailed visibility into TCP message transmission. Create a new tracepoint within the tcp_sendmsg_locked function, capturing traditional fields along with size_goal, which indicates the optimal data size for a single TCP segment. Additionally, a reference to the struct sock sk is passed, allowing direct access for BPF programs. The implementation is largely based on David's patch[1] and suggestions. Link: https://lore.kernel.org/all/70168c8f-bf52-4279-b4c4-be64527aa1ac@kernel.org/ [1] Signed-off-by: Breno Leitao <leitao@debian.org> Reviewed-by: David Ahern <dsahern@kernel.org> Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Link: https://patch.msgid.link/20250408-tcpsendmsg-v3-2-208b87064c28@debian.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--include/trace/events/tcp.h24
-rw-r--r--kernel/bpf/btf.c1
-rw-r--r--net/ipv4/tcp.c2
3 files changed, 27 insertions, 0 deletions
diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h
index 1a40c41ff8c3..75d3d53a3832 100644
--- a/include/trace/events/tcp.h
+++ b/include/trace/events/tcp.h
@@ -259,6 +259,30 @@ TRACE_EVENT(tcp_retransmit_synack,
__entry->saddr_v6, __entry->daddr_v6)
);
+TRACE_EVENT(tcp_sendmsg_locked,
+ TP_PROTO(const struct sock *sk, const struct msghdr *msg,
+ const struct sk_buff *skb, int size_goal),
+
+ TP_ARGS(sk, msg, skb, size_goal),
+
+ TP_STRUCT__entry(
+ __field(const void *, skb_addr)
+ __field(int, skb_len)
+ __field(int, msg_left)
+ __field(int, size_goal)
+ ),
+
+ TP_fast_assign(
+ __entry->skb_addr = skb;
+ __entry->skb_len = skb ? skb->len : 0;
+ __entry->msg_left = msg_data_left(msg);
+ __entry->size_goal = size_goal;
+ ),
+
+ TP_printk("skb_addr %p skb_len %d msg_left %d size_goal %d",
+ __entry->skb_addr, __entry->skb_len, __entry->msg_left,
+ __entry->size_goal));
+
DECLARE_TRACE(tcp_cwnd_reduction_tp,
TP_PROTO(const struct sock *sk, int newly_acked_sacked,
int newly_lost, int flag),
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 16ba36f34dfa..24a26b4bb0b8 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -6541,6 +6541,7 @@ static const struct bpf_raw_tp_null_args raw_tp_null_args[] = {
{ "xprt_put_cong", 0x10 },
/* tcp */
{ "tcp_send_reset", 0x11 },
+ { "tcp_sendmsg_locked", 0x100 },
/* tegra_apb_dma */
{ "tegra_dma_tx_status", 0x100 },
/* timer_migration */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6edc441b3702..e0e96f8fd47c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1160,6 +1160,8 @@ restart:
if (skb)
copy = size_goal - skb->len;
+ trace_tcp_sendmsg_locked(sk, msg, skb, size_goal);
+
if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
bool first_skb;