Enhance the bpf_arch_text_poke() function to enable accurate location
of BPF program entry points.

When modifying the entry point of a BPF program, skip the move t0, ra
instruction to ensure the correct logic and copy of the jump address.

Signed-off-by: Chenghao Duan <[email protected]>
---
 arch/loongarch/net/bpf_jit.c | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)

diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index 3dbabacc8856..0c16a1b18e8f 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -1290,6 +1290,10 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type 
old_t,
                       void *new_addr)
 {
        int ret;
+       unsigned long size = 0;
+       unsigned long offset = 0;
+       char namebuf[KSYM_NAME_LEN];
+       void *image = NULL;
        bool is_call;
        u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
        u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
@@ -1297,9 +1301,18 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type 
old_t,
        /* Only poking bpf text is supported. Since kernel function entry
         * is set up by ftrace, we rely on ftrace to poke kernel functions.
         */
-       if (!is_bpf_text_address((unsigned long)ip))
+       if (!__bpf_address_lookup((unsigned long)ip, &size, &offset, namebuf))
                return -ENOTSUPP;
 
+       image = ip - offset;
+       /* zero offset means we're poking bpf prog entry */
+       if (offset == 0)
+               /* skip to the nop instruction in bpf prog entry:
+                * move t0, ra
+                * nop
+                */
+               ip = image + LOONGARCH_INSN_SIZE;
+
        is_call = old_t == BPF_MOD_CALL;
        ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
        if (ret)
-- 
2.25.1


Reply via email to