On Mon, May 5, 2025 at 11:39 AM Lorenz Bauer <[email protected]> wrote:
>
> User space needs access to kernel BTF for many modern features of BPF.
> Right now each process needs to read the BTF blob either in pieces or
> as a whole. Allow mmaping the sysfs file so that processes can directly
> access the memory allocated for it in the kernel.
>
> Signed-off-by: Lorenz Bauer <[email protected]>
> ---
> include/asm-generic/vmlinux.lds.h | 3 ++-
> kernel/bpf/sysfs_btf.c | 37 +++++++++++++++++++++++++++++++++++++
> 2 files changed, 39 insertions(+), 1 deletion(-)
>
> diff --git a/include/asm-generic/vmlinux.lds.h
> b/include/asm-generic/vmlinux.lds.h
> index
> 58a635a6d5bdf0c53c267c2a3d21a5ed8678ce73..1750390735fac7637cc4d2fa05f96cb2a36aa448
> 100644
> --- a/include/asm-generic/vmlinux.lds.h
> +++ b/include/asm-generic/vmlinux.lds.h
> @@ -667,10 +667,11 @@ defined(CONFIG_AUTOFDO_CLANG) ||
> defined(CONFIG_PROPELLER_CLANG)
> */
> #ifdef CONFIG_DEBUG_INFO_BTF
> #define BTF \
> + . = ALIGN(PAGE_SIZE); \
> .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
> BOUNDED_SECTION_BY(.BTF, _BTF) \
> } \
> - . = ALIGN(4); \
> + . = ALIGN(PAGE_SIZE); \
> .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
> *(.BTF_ids) \
> }
> diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c
> index
> 81d6cf90584a7157929c50f62a5c6862e7a3d081..37278d7f38ae72f2d7efcfa859e86aaf12e39a25
> 100644
> --- a/kernel/bpf/sysfs_btf.c
> +++ b/kernel/bpf/sysfs_btf.c
> @@ -7,14 +7,51 @@
> #include <linux/kobject.h>
> #include <linux/init.h>
> #include <linux/sysfs.h>
> +#include <linux/mm.h>
> +#include <linux/io.h>
> +#include <linux/btf.h>
>
> /* See scripts/link-vmlinux.sh, gen_btf() func for details */
> extern char __start_BTF[];
> extern char __stop_BTF[];
>
> +static int btf_sysfs_vmlinux_mmap(struct file *filp, struct kobject *kobj,
> + const struct bin_attribute *attr,
> + struct vm_area_struct *vma)
> +{
> + unsigned long pages = PAGE_ALIGN(attr->size) >> PAGE_SHIFT;
> + size_t vm_size = vma->vm_end - vma->vm_start;
> + unsigned long addr = (unsigned long)attr->private;
> + int i, err = 0;
> +
> + if (addr != (unsigned long)__start_BTF || !PAGE_ALIGNED(addr))
> + return -EINVAL;
> +
> + if (vma->vm_pgoff)
> + return -EINVAL;
any particular reason to not allow vm_pgoff?
> +
> + if (vma->vm_flags & (VM_WRITE | VM_EXEC | VM_MAYSHARE))
> + return -EACCES;
> +
> + if (vm_size >> PAGE_SHIFT > pages)
() around shift operation, please, for those of us who haven't
memorized the entire C operator precedence table ;)
> + return -EINVAL;
> +
> + vm_flags_mod(vma, VM_DONTDUMP, VM_MAYEXEC | VM_MAYWRITE);
> +
> + for (i = 0; i < pages && !err; i++, addr += PAGE_SIZE)
> + err = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
> + virt_to_page(addr));
> +
> + if (err)
> + zap_vma_pages(vma);
it's certainly subjective, but I find this error handling with !err in
for loop condition hard to follow. What's wrong with arguably more
straightforward (and as you can see I'm not a big fan of mutated addr
but calculated vma->vm_start + i * PAGE_SIZE: pick one style one
follow it for both entities?):
for (i = 0; i < pages; i++) {
err = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
virt_to_page(addr + i * PAGE_SIZE));
if (err) {
zap_vma_pages(vma);
return err;
}
}
return 0;
?
> +
> + return err;
> +}
> +
> static struct bin_attribute bin_attr_btf_vmlinux __ro_after_init = {
> .attr = { .name = "vmlinux", .mode = 0444, },
> .read_new = sysfs_bin_attr_simple_read,
> + .mmap = btf_sysfs_vmlinux_mmap,
> };
>
> struct kobject *btf_kobj;
>
> --
> 2.49.0
>