From: Chengkaitao <[email protected]>

When traversing an rbtree using bpf_rbtree_left/right, if bpf_kptr_xchg
is used to access the __kptr pointer contained in a node, it currently
requires first removing the node with bpf_rbtree_remove and clearing the
NON_OWN_REF flag, then re-adding the node to the original rbtree with
bpf_rbtree_add after usage. This process significantly degrades rbtree
traversal performance. The patch enables accessing __kptr pointers with
the NON_OWN_REF flag set while holding the lock, eliminating the need
for this remove-read-readd sequence.

Signed-off-by: Chengkaitao <[email protected]>
Signed-off-by: Feng Yang <[email protected]>
---
 kernel/bpf/verifier.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 05a6a6606b6c..bb3ff4bbb3a2 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -9260,7 +9260,8 @@ static const struct bpf_reg_types timer_types = { .types 
= { PTR_TO_MAP_VALUE }
 static const struct bpf_reg_types kptr_xchg_dest_types = {
        .types = {
                PTR_TO_MAP_VALUE,
-               PTR_TO_BTF_ID | MEM_ALLOC
+               PTR_TO_BTF_ID | MEM_ALLOC,
+               PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF
        }
 };
 static const struct bpf_reg_types dynptr_types = {
@@ -9420,6 +9421,7 @@ static int check_reg_type(struct bpf_verifier_env *env, 
u32 regno,
        }
        case PTR_TO_BTF_ID | MEM_ALLOC:
        case PTR_TO_BTF_ID | MEM_PERCPU | MEM_ALLOC:
+       case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF:
                if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != 
BPF_FUNC_spin_unlock &&
                    meta->func_id != BPF_FUNC_kptr_xchg) {
                        verifier_bug(env, "unimplemented handling of 
MEM_ALLOC");
-- 
2.50.1 (Apple Git-155)


Reply via email to