From: Kaitao Cheng <[email protected]> Extend the refcounted_kptr test: add a node to both an rbtree and a list, retrieve the node from the rbtree to obtain the node pointer, then add a new node after the first in the list, and finally use bpf_list_del to remove both nodes.
The test asserts that the list is non-empty after insert, asserts the first and last nodes after bpf_list_add, and asserts that the list is empty after removing both nodes. To verify the validity of bpf_list_del/add, the test also expects the verifier to reject calls to bpf_list_del/add made without holding the spin_lock. Signed-off-by: Kaitao Cheng <[email protected]> --- .../testing/selftests/bpf/bpf_experimental.h | 39 ++++ .../selftests/bpf/progs/refcounted_kptr.c | 182 ++++++++++++++++++ 2 files changed, 221 insertions(+) diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 4b7210c318dd..d5f42ed69166 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -99,6 +99,45 @@ extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ks */ extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; +/* Description + * Remove 'node' from the BPF linked list with head 'head'. + * The node must be in the list. Caller receives ownership of the + * removed node and must release it with bpf_obj_drop. + * Returns + * Pointer to the removed bpf_list_node, or NULL if 'node' is NULL + * or not in the list. + */ +extern struct bpf_list_node *bpf_list_del(struct bpf_list_head *head, + struct bpf_list_node *node) __ksym; + +/* Description + * Insert 'new' after 'prev' in the BPF linked list with head 'head'. + * The bpf_spin_lock protecting the list must be held. 'prev' must already + * be in that list; 'new' must not be in any list. The 'meta' and 'off' + * parameters are rewritten by the verifier, no need for BPF programs to + * set them. + * Returns + * 0 on success, -EINVAL if head is NULL, prev is not in the list with head, + * or new is already in a list. + */ +extern int bpf_list_add_impl(struct bpf_list_head *head, struct bpf_list_node *new, + struct bpf_list_node *prev, void *meta, __u64 off) __ksym; + +/* Convenience macro to wrap over bpf_list_add_impl */ +#define bpf_list_add(head, new, prev) bpf_list_add_impl(head, new, prev, NULL, 0) + +/* Description + * Return true if 'node' is the first (when 'is_first' is true) or the last + * (when 'is_first' is false) node in the list with head 'head'. + */ +extern bool bpf_list_node_is_edge(struct bpf_list_head *head, + struct bpf_list_node *node, bool is_first) __ksym; + +/* Description + * Return true if the list with head 'head' has no entries. + */ +extern bool bpf_list_empty(struct bpf_list_head *head) __ksym; + /* Description * Remove 'node' from rbtree with root 'root' * Returns diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c index 1aca85d86aeb..aca201f9fb56 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -367,6 +367,188 @@ long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \ INSERT_STASH_READ(true, "insert_stash_read: remove from tree"); INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree"); +/* Insert one node in tree and list, remove it from tree, add a second node + * after it with bpf_list_add, check bpf_list_node_is_edge/empty, then + * remove both nodes from list via bpf_list_del. + */ +SEC("tc") +__description("list_add_del_and_check: test bpf_list_add/del/is_first/is_last/empty") +__success __retval(0) +long list_add_del_and_check(void *ctx) +{ + long err = 0; + struct bpf_rb_node *rb; + struct bpf_list_node *l, *l_1; + struct node_data *n, *n_1, *m_1; + + err = __insert_in_tree_and_list(&head, &root, &lock); + if (err) + return err; + + bpf_spin_lock(&lock); + if (bpf_list_empty(&head)) { + bpf_spin_unlock(&lock); + return -7; + } + + rb = bpf_rbtree_first(&root); + if (!rb) { + bpf_spin_unlock(&lock); + return -4; + } + + rb = bpf_rbtree_remove(&root, rb); + bpf_spin_unlock(&lock); + if (!rb) + return -5; + + n = container_of(rb, struct node_data, r); + n_1 = bpf_obj_new(typeof(*n_1)); + if (!n_1) { + bpf_obj_drop(n); + return -1; + } + m_1 = bpf_refcount_acquire(n_1); + if (!m_1) { + bpf_obj_drop(n); + bpf_obj_drop(n_1); + return -1; + } + + bpf_spin_lock(&lock); + if (bpf_list_add(&head, &n_1->l, &n->l)) { + bpf_spin_unlock(&lock); + bpf_obj_drop(n); + bpf_obj_drop(m_1); + return -8; + } + + if (!bpf_list_node_is_edge(&head, &n->l, true) || + !bpf_list_node_is_edge(&head, &m_1->l, false)) { + bpf_spin_unlock(&lock); + bpf_obj_drop(n); + bpf_obj_drop(m_1); + return -9; + } + + l = bpf_list_del(&head, &n->l); + l_1 = bpf_list_del(&head, &m_1->l); + bpf_spin_unlock(&lock); + bpf_obj_drop(n); + bpf_obj_drop(m_1); + + if (l) + bpf_obj_drop(container_of(l, struct node_data, l)); + else + err = -6; + + if (l_1) + bpf_obj_drop(container_of(l_1, struct node_data, l)); + else + err = -6; + + bpf_spin_lock(&lock); + if (!bpf_list_empty(&head)) + err = -7; + bpf_spin_unlock(&lock); + return err; +} + +SEC("?tc") +__failure __msg("bpf_spin_lock at off=32 must be held for bpf_list_head") +long list_del_without_lock_fail(void *ctx) +{ + struct bpf_rb_node *rb; + struct bpf_list_node *l; + struct node_data *n; + + bpf_spin_lock(&lock); + rb = bpf_rbtree_first(&root); + if (!rb) { + bpf_spin_unlock(&lock); + return -4; + } + + rb = bpf_rbtree_remove(&root, rb); + bpf_spin_unlock(&lock); + if (!rb) + return -5; + + n = container_of(rb, struct node_data, r); + l = bpf_list_del(&head, &n->l); + bpf_obj_drop(n); + if (!l) + return -6; + + bpf_obj_drop(container_of(l, struct node_data, l)); + return 0; +} + +SEC("?tc") +__failure __msg("bpf_spin_lock at off=32 must be held for bpf_list_head") +long list_add_without_lock_fail(void *ctx) +{ + long err = 0; + struct bpf_rb_node *rb; + struct bpf_list_node *l, *l_1; + struct node_data *n, *n_1, *m_1; + + err = __insert_in_tree_and_list(&head, &root, &lock); + if (err) + return err; + + bpf_spin_lock(&lock); + rb = bpf_rbtree_first(&root); + if (!rb) { + bpf_spin_unlock(&lock); + return -4; + } + + rb = bpf_rbtree_remove(&root, rb); + bpf_spin_unlock(&lock); + if (!rb) + return -5; + + n = container_of(rb, struct node_data, r); + n_1 = bpf_obj_new(typeof(*n_1)); + if (!n_1) { + bpf_obj_drop(n); + return -1; + } + m_1 = bpf_refcount_acquire(n_1); + if (!m_1) { + bpf_obj_drop(n); + bpf_obj_drop(n_1); + return -1; + } + + /* Intentionally no lock: verifier should reject bpf_list_add without lock */ + if (bpf_list_add(&head, &n_1->l, &n->l)) { + bpf_obj_drop(n); + bpf_obj_drop(m_1); + return -8; + } + + bpf_spin_lock(&lock); + l = bpf_list_del(&head, &n->l); + l_1 = bpf_list_del(&head, &m_1->l); + bpf_spin_unlock(&lock); + bpf_obj_drop(n); + bpf_obj_drop(m_1); + + if (l) + bpf_obj_drop(container_of(l, struct node_data, l)); + else + err = -6; + + if (l_1) + bpf_obj_drop(container_of(l_1, struct node_data, l)); + else + err = -6; + + return err; +} + SEC("tc") __success long rbtree_refcounted_node_ref_escapes(void *ctx) -- 2.50.1 (Apple Git-155)

