On Fri, 2025-07-18 at 16:25 +0100, Leo Yan wrote:

[...]

> +__bpf_kfunc int bpf_perf_event_aux_pause(void *p__map, u64 flags, u32 pause)
> +{
> +     struct bpf_map *map = p__map;
> +     struct bpf_array *array = container_of(map, struct bpf_array, map);

Verifier makes sure that p__map is a not null pointer to an object of
type bpf_map, but it does not guarantee that the object is an instance
of bpf_array.
You need to check map->type, same way bpf_arena_alloc_pages() does.

> +     unsigned int cpu = smp_processor_id();
> +     u64 index = flags & BPF_F_INDEX_MASK;
> +     struct bpf_event_entry *ee;
> +     int ret = 0;
> +
> +     /* Disabling IRQ avoids race condition with perf event flows. */
> +     guard(irqsave)();
> +
> +     if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
> +             ret = -EINVAL;
> +             goto out;
> +     }
> +
> +     if (index == BPF_F_CURRENT_CPU)
> +             index = cpu;
> +
> +     if (unlikely(index >= array->map.max_entries)) {
> +             ret = -E2BIG;
> +             goto out;
> +     }
> +
> +     ee = READ_ONCE(array->ptrs[index]);
> +     if (!ee) {
> +             ret = -ENOENT;
> +             goto out;
> +     }
> +
> +     if (!has_aux(ee->event))
> +             ret = -EINVAL;
> +
> +     perf_event_aux_pause(ee->event, pause);
> +out:
> +     return ret;
> +}

[...]

Reply via email to