From: Vineeth Pillai <[email protected]>

Replace trace_foo() with the new trace_call__foo() at sites already
guarded by trace_foo_enabled(), avoiding a redundant
static_branch_unlikely() re-evaluation inside the tracepoint.
trace_call__foo() calls the tracepoint callbacks directly without
utilizing the static branch again.

Original v2 series:
https://lore.kernel.org/linux-trace-kernel/[email protected]/

Parts of the original v2 series have already been merged in mainline.
This patch is being reposted as a follow-up cleanup for the remaining
unmerged pieces.

Suggested-by: Steven Rostedt <[email protected]>
Suggested-by: Peter Zijlstra <[email protected]>
Signed-off-by: Vineeth Pillai (Google) <[email protected]>
Assisted-by: Claude:claude-sonnet-4-6
---
 net/core/dev.c             | 2 +-
 net/core/xdp.c             | 2 +-
 net/openvswitch/actions.c  | 2 +-
 net/openvswitch/datapath.c | 2 +-
 net/sctp/outqueue.c        | 2 +-
 net/tipc/node.c            | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/net/core/dev.c b/net/core/dev.c
index 8bfa8313ef62..12a583ce4d95 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6482,7 +6482,7 @@ void netif_receive_skb_list(struct list_head *head)
                return;
        if (trace_netif_receive_skb_list_entry_enabled()) {
                list_for_each_entry(skb, head, list)
-                       trace_netif_receive_skb_list_entry(skb);
+                       trace_call__netif_receive_skb_list_entry(skb);
        }
        netif_receive_skb_list_internal(head);
        trace_netif_receive_skb_list_exit(0);
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 9890a30584ba..3003e5c57419 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -362,7 +362,7 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
                xsk_pool_set_rxq_info(allocator, xdp_rxq);
 
        if (trace_mem_connect_enabled() && xdp_alloc)
-               trace_mem_connect(xdp_alloc, xdp_rxq);
+               trace_call__mem_connect(xdp_alloc, xdp_rxq);
        return 0;
 }
 
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 140388a18ae0..7b7c93c3bde4 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1260,7 +1260,7 @@ static int do_execute_actions(struct datapath *dp, struct 
sk_buff *skb,
                int err = 0;
 
                if (trace_ovs_do_execute_action_enabled())
-                       trace_ovs_do_execute_action(dp, skb, key, a, rem);
+                       trace_call__ovs_do_execute_action(dp, skb, key, a, rem);
 
                /* Actions that rightfully have to consume the skb should do it
                 * and return directly.
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index bbbde50fc649..f2b6688f18d6 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -335,7 +335,7 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
        int err;
 
        if (trace_ovs_dp_upcall_enabled())
-               trace_ovs_dp_upcall(dp, skb, key, upcall_info);
+               trace_call__ovs_dp_upcall(dp, skb, key, upcall_info);
 
        if (upcall_info->portid == 0) {
                err = -ENOTCONN;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index f6b8c13dafa4..4025d863ffc8 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1267,7 +1267,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk 
*chunk)
        /* SCTP path tracepoint for congestion control debugging. */
        if (trace_sctp_probe_path_enabled()) {
                list_for_each_entry(transport, transport_list, transports)
-                       trace_sctp_probe_path(transport, asoc);
+                       trace_call__sctp_probe_path(transport, asoc);
        }
 
        sack_ctsn = ntohl(sack->cum_tsn_ack);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 97aa970a0d83..6cfe4c40c82b 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1943,7 +1943,7 @@ static bool tipc_node_check_state(struct tipc_node *n, 
struct sk_buff *skb,
 
        if (trace_tipc_node_check_state_enabled()) {
                trace_tipc_skb_dump(skb, false, "skb for node state check");
-               trace_tipc_node_check_state(n, true, " ");
+               trace_call__tipc_node_check_state(n, true, " ");
        }
        l = n->links[bearer_id].link;
        if (!l)
-- 
2.54.0


Reply via email to