arena_vm_open() only increments a refcount on the existing vma_list
entry without creating a new entry for the child's VMA. After fork,
vml->vma still points to the parent's VMA. When the parent unmaps
(arena_vm_close decrements refcount but doesn't remove the entry),
vml->vma becomes a dangling pointer. A subsequent bpf_arena_free_pages
call reaches zap_pages() which dereferences the freed VMA via
zap_vma_range(vml->vma, ...), causing a use-after-free:
BUG: KASAN: slab-use-after-free in zap_vma_range+0xf2/0x100
Read of size 8 at addr ff11000113ec9b10 by task test_progs/198
Call Trace:
zap_vma_range+0xf2/0x100
arena_free_pages+0x6de/0x970
bpf_prog_a2b540a82b1066f3_arena_free+0x8b/0xb6
bpf_prog_test_run_syscall+0x3d3/0x8a0
The same issue is triggered by __split_vma (partial munmap) and
copy_vma (mremap), both of which call vm_ops->open.
Fix this by giving each VMA its own vma_list entry instead of sharing
one with a refcount. arena_vm_open now allocates a new entry for the
new VMA, and arena_vm_close always removes and frees its own entry.
If the allocation fails in arena_vm_open, vm_private_data is set to
NULL and arena_vm_close handles this gracefully, meaning the VMA
simply won't be zapped during arena page frees.
Fixes: 317460317a02 ("bpf: Introduce bpf_arena.")
Signed-off-by: Daniel Hodges <[email protected]>
Assisted-by: Claude-Code:claude-opus-4-6
---
kernel/bpf/arena.c | 19 +++++++++++++------
1 file changed, 13 insertions(+), 6 deletions(-)
diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c
index 49a8f7b1beef..a3c46100dd12 100644
--- a/kernel/bpf/arena.c
+++ b/kernel/bpf/arena.c
@@ -317,7 +317,6 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
struct vma_list {
struct vm_area_struct *vma;
struct list_head head;
- refcount_t mmap_count;
};
static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
@@ -327,7 +326,6 @@ static int remember_vma(struct bpf_arena *arena, struct
vm_area_struct *vma)
vml = kmalloc_obj(*vml);
if (!vml)
return -ENOMEM;
- refcount_set(&vml->mmap_count, 1);
vma->vm_private_data = vml;
vml->vma = vma;
list_add(&vml->head, &arena->vma_list);
@@ -336,9 +334,19 @@ static int remember_vma(struct bpf_arena *arena, struct
vm_area_struct *vma)
static void arena_vm_open(struct vm_area_struct *vma)
{
- struct vma_list *vml = vma->vm_private_data;
+ struct bpf_map *map = vma->vm_file->private_data;
+ struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
+ struct vma_list *vml;
- refcount_inc(&vml->mmap_count);
+ vml = kmalloc_obj(*vml);
+ if (!vml) {
+ vma->vm_private_data = NULL;
+ return;
+ }
+ vml->vma = vma;
+ vma->vm_private_data = vml;
+ guard(mutex)(&arena->lock);
+ list_add(&vml->head, &arena->vma_list);
}
static int arena_vm_may_split(struct vm_area_struct *vma, unsigned long addr)
@@ -357,10 +365,9 @@ static void arena_vm_close(struct vm_area_struct *vma)
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
struct vma_list *vml = vma->vm_private_data;
- if (!refcount_dec_and_test(&vml->mmap_count))
+ if (!vml)
return;
guard(mutex)(&arena->lock);
- /* update link list under lock */
list_del(&vml->head);
vma->vm_private_data = NULL;
kfree(vml);
--
2.52.0