From: Feng Yang <[email protected]> All BPF programs either disable CPU preemption or CPU migration, so the bpf_get_smp_processor_id_proto can be safely removed, and the bpf_get_raw_smp_processor_id_proto in bpf_base_func_proto works perfectly.
Suggested-by: Andrii Nakryiko <[email protected]> Signed-off-by: Feng Yang <[email protected]> --- include/linux/bpf.h | 1 - kernel/bpf/core.c | 1 - kernel/bpf/helpers.c | 12 ------------ kernel/trace/bpf_trace.c | 2 -- net/core/filter.c | 6 ------ 5 files changed, 22 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3f0cc89c0622..36e525141556 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -3316,7 +3316,6 @@ extern const struct bpf_func_proto bpf_map_peek_elem_proto; extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; -extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; extern const struct bpf_func_proto bpf_get_numa_node_id_proto; extern const struct bpf_func_proto bpf_tail_call_proto; extern const struct bpf_func_proto bpf_ktime_get_ns_proto; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ba6b6118cf50..1ad41a16b86e 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2943,7 +2943,6 @@ const struct bpf_func_proto bpf_spin_unlock_proto __weak; const struct bpf_func_proto bpf_jiffies64_proto __weak; const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; -const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index e3a2662f4e33..2d2bfb2911f8 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -149,18 +149,6 @@ const struct bpf_func_proto bpf_get_prandom_u32_proto = { .ret_type = RET_INTEGER, }; -BPF_CALL_0(bpf_get_smp_processor_id) -{ - return smp_processor_id(); -} - -const struct bpf_func_proto bpf_get_smp_processor_id_proto = { - .func = bpf_get_smp_processor_id, - .gpl_only = false, - .ret_type = RET_INTEGER, - .allow_fastcall = true, -}; - BPF_CALL_0(bpf_get_numa_node_id) { return numa_node_id(); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0f5906f43d7c..39360cd6baf1 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1462,8 +1462,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_current_comm_proto; case BPF_FUNC_trace_printk: return bpf_get_trace_printk_proto(); - case BPF_FUNC_get_smp_processor_id: - return &bpf_get_smp_processor_id_proto; case BPF_FUNC_get_numa_node_id: return &bpf_get_numa_node_id_proto; case BPF_FUNC_perf_event_read: diff --git a/net/core/filter.c b/net/core/filter.c index bc6828761a47..7f7ec913ddbc 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8264,8 +8264,6 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_set_hash_proto; case BPF_FUNC_perf_event_output: return &bpf_skb_event_output_proto; - case BPF_FUNC_get_smp_processor_id: - return &bpf_get_smp_processor_id_proto; case BPF_FUNC_skb_under_cgroup: return &bpf_skb_under_cgroup_proto; case BPF_FUNC_get_socket_cookie: @@ -8343,8 +8341,6 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) switch (func_id) { case BPF_FUNC_perf_event_output: return &bpf_xdp_event_output_proto; - case BPF_FUNC_get_smp_processor_id: - return &bpf_get_smp_processor_id_proto; case BPF_FUNC_csum_diff: return &bpf_csum_diff_proto; case BPF_FUNC_xdp_adjust_head: @@ -8570,8 +8566,6 @@ lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_hash_recalc_proto; case BPF_FUNC_perf_event_output: return &bpf_skb_event_output_proto; - case BPF_FUNC_get_smp_processor_id: - return &bpf_get_smp_processor_id_proto; case BPF_FUNC_skb_under_cgroup: return &bpf_skb_under_cgroup_proto; default: -- 2.39.0.windows.2
