Ideally, the offset used to load the tail call info field and to find
the pass by reference address for tail call field should be the same.
But while setting up the tail call info in the trampoline, this was
not followed. This can be misleading and can lead to unpredicable
results if and when bpf_has_stack_frame() ends up returning true
for trampoline frame. Since commit 15513beeb673 ("powerpc64/bpf:
Moving tail_call_cnt to bottom of frame") and commit 2ed2d8f6fb38
("powerpc64/bpf: Support tailcalls with subprogs") ensured tail call
field is at the bottom of the stack frame for BPF programs as well as
BPF trampoline, avoid relying on bpf_jit_stack_tailcallinfo_offset()
and bpf_has_stack_frame() for trampoline frame and always calculate
tail call field offset with reference to older frame.

Fixes: 2ed2d8f6fb38 ("powerpc64/bpf: Support tailcalls with subprogs")
Signed-off-by: Hari Bathini <[email protected]>
---
 arch/powerpc/net/bpf_jit.h        |  5 -----
 arch/powerpc/net/bpf_jit_comp.c   | 10 ++++------
 arch/powerpc/net/bpf_jit_comp64.c |  5 ++++-
 3 files changed, 8 insertions(+), 12 deletions(-)

diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 82bbf63f0e57..7354e1d72f79 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -81,9 +81,6 @@
 
 #ifdef CONFIG_PPC64
 
-/* for gpr non volatile registers BPG_REG_6 to 10 */
-#define BPF_PPC_STACK_SAVE     (6 * 8)
-
 /* If dummy pass (!image), account for maximum possible instructions */
 #define PPC_LI64(d, i)         do {                                          \
        if (!image)                                                           \
@@ -219,8 +216,6 @@ int bpf_jit_emit_exit_insn(u32 *image, struct 
codegen_context *ctx, int tmp_reg,
 int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int 
pass,
                          struct codegen_context *ctx, int insn_idx,
                          int jmp_off, int dst_reg, u32 code);
-
-int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx);
 #endif
 
 #endif
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index fb6cc1f832a8..1ff8030faf1f 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -642,15 +642,13 @@ static void bpf_trampoline_setup_tail_call_info(u32 
*image, struct codegen_conte
                                                int bpf_dummy_frame_size, int 
r4_off)
 {
        if (IS_ENABLED(CONFIG_PPC64)) {
-               /* See Generated stack layout */
-               int tailcallinfo_offset = BPF_PPC_TAILCALL;
-
                /*
                 * func_frame_offset =                                   ...(1)
                 *      bpf_dummy_frame_size + trampoline_frame_size
                 */
                EMIT(PPC_RAW_LD(_R4, _R1, func_frame_offset));
-               EMIT(PPC_RAW_LD(_R3, _R4, -tailcallinfo_offset));
+               /* Refer to trampoline's Generated stack layout */
+               EMIT(PPC_RAW_LD(_R3, _R4, -BPF_PPC_TAILCALL));
 
                /*
                 * Setting the tail_call_info in trampoline's frame
@@ -658,7 +656,7 @@ static void bpf_trampoline_setup_tail_call_info(u32 *image, 
struct codegen_conte
                 */
                EMIT(PPC_RAW_CMPLWI(_R3, MAX_TAIL_CALL_CNT));
                PPC_BCC_CONST_SHORT(COND_GT, 8);
-               EMIT(PPC_RAW_ADDI(_R3, _R4, 
bpf_jit_stack_tailcallinfo_offset(ctx)));
+               EMIT(PPC_RAW_ADDI(_R3, _R4, -BPF_PPC_TAILCALL));
                /*
                 * From ...(1) above:
                 * trampoline_frame_bottom =                            ...(2)
@@ -673,7 +671,7 @@ static void bpf_trampoline_setup_tail_call_info(u32 *image, 
struct codegen_conte
                 * tail_call_info in trampoline frame.
                 */
                EMIT(PPC_RAW_STL(_R3, _R1, (func_frame_offset - 
bpf_dummy_frame_size)
-                                                               - 
tailcallinfo_offset));
+                                                               - 
BPF_PPC_TAILCALL));
        } else {
                /* See bpf_jit_stack_offsetof() and BPF_PPC_TC */
                EMIT(PPC_RAW_LL(_R4, _R1, r4_off));
diff --git a/arch/powerpc/net/bpf_jit_comp64.c 
b/arch/powerpc/net/bpf_jit_comp64.c
index 44ce8a8783f9..5d4d2bb23cef 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -42,6 +42,9 @@
  * exception boundary.
  */
 
+/* BPF non-volatile registers save area size */
+#define BPF_PPC_STACK_SAVE     (6*8)
+
 /* for bpf JIT code internal usage */
 #define BPF_PPC_STACK_LOCALS   24
 /*
@@ -148,7 +151,7 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
        }
 }
 
-int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx)
+static int bpf_jit_stack_tailcallinfo_offset(struct codegen_context *ctx)
 {
        return bpf_jit_stack_local(ctx) + BPF_PPC_STACK_LOCALS + 
BPF_PPC_STACK_SAVE;
 }
-- 
2.53.0


Reply via email to