For now, the offset of the return value in trampoline is fixed 8-bytes.
In this commit, we introduce the variable "ret_off" to represent the
offset of the return value. For now, the "ret_off" is just 8. And in the
following patch, we will make it something else to use the room after it.

Signed-off-by: Menglong Dong <[email protected]>
---
 arch/x86/net/bpf_jit_comp.c | 41 +++++++++++++++++++++----------------
 1 file changed, 23 insertions(+), 18 deletions(-)

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 389c3a96e2b8..7a604ee9713f 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -2940,7 +2940,7 @@ static void restore_regs(const struct btf_func_model *m, 
u8 **prog,
 
 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
                           struct bpf_tramp_link *l, int stack_size,
-                          int run_ctx_off, bool save_ret,
+                          int run_ctx_off, bool save_ret, int ret_off,
                           void *image, void *rw_image)
 {
        u8 *prog = *pprog;
@@ -3005,7 +3005,7 @@ static int invoke_bpf_prog(const struct btf_func_model 
*m, u8 **pprog,
         * value of BPF_PROG_TYPE_STRUCT_OPS prog.
         */
        if (save_ret)
-               emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+               emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ret_off);
 
        /* replace 2 nops with JE insn, since jmp target is known */
        jmp_insn[0] = X86_JE;
@@ -3055,7 +3055,7 @@ static int emit_cond_near_jump(u8 **pprog, void *func, 
void *ip, u8 jmp_cond)
 
 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
                      struct bpf_tramp_links *tl, int stack_size,
-                     int run_ctx_off, bool save_ret,
+                     int run_ctx_off, bool save_ret, int ret_off,
                      void *image, void *rw_image)
 {
        int i;
@@ -3063,7 +3063,8 @@ static int invoke_bpf(const struct btf_func_model *m, u8 
**pprog,
 
        for (i = 0; i < tl->nr_links; i++) {
                if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
-                                   run_ctx_off, save_ret, image, rw_image))
+                                   run_ctx_off, save_ret, ret_off, image,
+                                   rw_image))
                        return -EINVAL;
        }
        *pprog = prog;
@@ -3072,7 +3073,7 @@ static int invoke_bpf(const struct btf_func_model *m, u8 
**pprog,
 
 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
                              struct bpf_tramp_links *tl, int stack_size,
-                             int run_ctx_off, u8 **branches,
+                             int run_ctx_off, int ret_off, u8 **branches,
                              void *image, void *rw_image)
 {
        u8 *prog = *pprog;
@@ -3082,18 +3083,18 @@ static int invoke_bpf_mod_ret(const struct 
btf_func_model *m, u8 **pprog,
         * Set this to 0 to avoid confusing the program.
         */
        emit_mov_imm32(&prog, false, BPF_REG_0, 0);
-       emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+       emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ret_off);
        for (i = 0; i < tl->nr_links; i++) {
                if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, 
run_ctx_off, true,
-                                   image, rw_image))
+                                   ret_off, image, rw_image))
                        return -EINVAL;
 
-               /* mod_ret prog stored return value into [rbp - 8]. Emit:
-                * if (*(u64 *)(rbp - 8) !=  0)
+               /* mod_ret prog stored return value into [rbp - ret_off]. Emit:
+                * if (*(u64 *)(rbp - ret_off) !=  0)
                 *      goto do_fexit;
                 */
-               /* cmp QWORD PTR [rbp - 0x8], 0x0 */
-               EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
+               /* cmp QWORD PTR [rbp - ret_off], 0x0 */
+               EMIT4(0x48, 0x83, 0x7d, -ret_off); EMIT1(0x00);
 
                /* Save the location of the branch and Generate 6 nops
                 * (4 bytes for an offset and 2 bytes for the jump) These nops
@@ -3179,7 +3180,8 @@ static int __arch_prepare_bpf_trampoline(struct 
bpf_tramp_image *im, void *rw_im
                                         void *func_addr)
 {
        int i, ret, nr_regs = m->nr_args, stack_size = 0;
-       int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
+       int ret_off, regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off,
+           rbx_off;
        struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
        struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
        struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
@@ -3213,7 +3215,7 @@ static int __arch_prepare_bpf_trampoline(struct 
bpf_tramp_image *im, void *rw_im
         * RBP + 8         [ return address  ]
         * RBP + 0         [ RBP             ]
         *
-        * RBP - 8         [ return value    ]  BPF_TRAMP_F_CALL_ORIG or
+        * RBP - ret_off   [ return value    ]  BPF_TRAMP_F_CALL_ORIG or
         *                                      BPF_TRAMP_F_RET_FENTRY_RET flags
         *
         *                 [ reg_argN        ]  always
@@ -3239,6 +3241,7 @@ static int __arch_prepare_bpf_trampoline(struct 
bpf_tramp_image *im, void *rw_im
        save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
        if (save_ret)
                stack_size += 8;
+       ret_off = stack_size;
 
        stack_size += nr_regs * 8;
        regs_off = stack_size;
@@ -3341,7 +3344,8 @@ static int __arch_prepare_bpf_trampoline(struct 
bpf_tramp_image *im, void *rw_im
 
        if (fentry->nr_links) {
                if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
-                              flags & BPF_TRAMP_F_RET_FENTRY_RET, image, 
rw_image))
+                              flags & BPF_TRAMP_F_RET_FENTRY_RET, ret_off,
+                              image, rw_image))
                        return -EINVAL;
        }
 
@@ -3352,7 +3356,8 @@ static int __arch_prepare_bpf_trampoline(struct 
bpf_tramp_image *im, void *rw_im
                        return -ENOMEM;
 
                if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
-                                      run_ctx_off, branches, image, rw_image)) 
{
+                                      run_ctx_off, ret_off, branches,
+                                      image, rw_image)) {
                        ret = -EINVAL;
                        goto cleanup;
                }
@@ -3380,7 +3385,7 @@ static int __arch_prepare_bpf_trampoline(struct 
bpf_tramp_image *im, void *rw_im
                        }
                }
                /* remember return value in a stack for bpf prog to access */
-               emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+               emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ret_off);
                im->ip_after_call = image + (prog - (u8 *)rw_image);
                emit_nops(&prog, X86_PATCH_SIZE);
        }
@@ -3403,7 +3408,7 @@ static int __arch_prepare_bpf_trampoline(struct 
bpf_tramp_image *im, void *rw_im
 
        if (fexit->nr_links) {
                if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off,
-                              false, image, rw_image)) {
+                              false, ret_off, image, rw_image)) {
                        ret = -EINVAL;
                        goto cleanup;
                }
@@ -3433,7 +3438,7 @@ static int __arch_prepare_bpf_trampoline(struct 
bpf_tramp_image *im, void *rw_im
 
        /* restore return value of orig_call or fentry prog back into RAX */
        if (save_ret)
-               emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
+               emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -ret_off);
 
        emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off);
        EMIT1(0xC9); /* leave */
-- 
2.51.1


Reply via email to