Christophe Leroy wrote:


Le 06/01/2022 à 12:45, Naveen N. Rao a écrit :
These instructions are updated after the initial JIT, so redo codegen
during the extra pass. Rename bpf_jit_fixup_subprog_calls() to clarify
that this is more than just subprog calls.

Fixes: 69c087ba6225b5 ("bpf: Add bpf_for_each_map_elem() helper")
Cc: sta...@vger.kernel.org # v5.15
Signed-off-by: Naveen N. Rao <naveen.n....@linux.vnet.ibm.com>
---
  arch/powerpc/net/bpf_jit_comp.c   | 29 +++++++++++++++++++++++------
  arch/powerpc/net/bpf_jit_comp32.c |  6 ++++++
  arch/powerpc/net/bpf_jit_comp64.c |  7 ++++++-
  3 files changed, 35 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index d6ffdd0f2309d0..56dd1f4e3e4447 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -23,15 +23,15 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int 
size)
        memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
  }
-/* Fix the branch target addresses for subprog calls */
-static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
-                                      struct codegen_context *ctx, u32 *addrs)
+/* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass 
*/
+static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
+                                  struct codegen_context *ctx, u32 *addrs)
  {
        const struct bpf_insn *insn = fp->insnsi;
        bool func_addr_fixed;
        u64 func_addr;
        u32 tmp_idx;
-       int i, ret;
+       int i, j, ret;
for (i = 0; i < fp->len; i++) {
                /*
@@ -66,6 +66,23 @@ static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, 
u32 *image,
                         * of the JITed sequence remains unchanged.
                         */
                        ctx->idx = tmp_idx;
+               } else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) {
+                       tmp_idx = ctx->idx;
+                       ctx->idx = addrs[i] / 4;
+#ifdef CONFIG_PPC32
+                       PPC_LI32(ctx->b2p[insn[i].dst_reg] - 1, (u32)insn[i + 
1].imm);
+                       PPC_LI32(ctx->b2p[insn[i].dst_reg], (u32)insn[i].imm);
+                       for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
+                               EMIT(PPC_RAW_NOP());
+#else
+                       func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 
1].imm) << 32);
+                       PPC_LI64(b2p[insn[i].dst_reg], func_addr);
+                       /* overwrite rest with nops */
+                       for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
+                               EMIT(PPC_RAW_NOP());
+#endif

#ifdefs should be avoided as much as possible.

Here it seems we could easily do an

        if (IS_ENABLED(CONFIG_PPC32)) {
        } else {
        }

And it looks like the CONFIG_PPC64 alternative would in fact also work on PPC32, wouldn't it ?

We never implemented PPC_LI64() for ppc32:
/linux/arch/powerpc/net/bpf_jit_comp.c: In function 'bpf_jit_fixup_addresses':
 /linux/arch/powerpc/net/bpf_jit_comp.c:81:5: error: this decimal constant is 
unsigned only in ISO C90 [-Werror]
    81 |     PPC_LI64(b2p[insn[i].dst_reg], func_addr);
        |     ^~~~~~~~
 /linux/arch/powerpc/net/bpf_jit_comp.c:81:5: error: this decimal constant is 
unsigned only in ISO C90 [-Werror]
 In file included from /linux/arch/powerpc/net/bpf_jit_comp.c:19:
 /linux/arch/powerpc/net/bpf_jit.h:78:40: error: right shift count >= width of 
type [-Werror=shift-count-overflow]
    78 |     EMIT(PPC_RAW_LI(d, ((uintptr_t)(i) >> 32) &   \
        |                                        ^~


We should move that out from bpf_jit.h


- Naveen

Reply via email to