powerpc supports BPF atomic operations using a loop around
Load-And-Reserve(LDARX/LWARX) and Store-Conditional(STDCX/STWCX)
instructions gated by sync instructions to enforce full ordering.

To implement arena_atomics, arena vm start address is added to the
dst_reg to be used for both the LDARX/LWARX and STDCX/STWCX instructions.
Further, an exception table entry is added for LDARX/LWARX
instruction to land after the loop on fault. At the end of sequence,
dst_reg is restored by subtracting arena vm start address.

bpf_jit_supports_insn() is introduced to selectively enable instruction
support as in other architectures like x86 and arm64.

Signed-off-by: Saket Kumar Bhaskar <sk...@linux.ibm.com>
---
 arch/powerpc/net/bpf_jit_comp.c   | 16 ++++++++++++++++
 arch/powerpc/net/bpf_jit_comp64.c | 26 ++++++++++++++++++++++++++
 2 files changed, 42 insertions(+)

diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 2b3f90930c27..69232ee56c6a 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -452,6 +452,22 @@ bool bpf_jit_supports_far_kfunc_call(void)
        return IS_ENABLED(CONFIG_PPC64);
 }
 
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
+{
+       if (!in_arena)
+               return true;
+       switch (insn->code) {
+       case BPF_STX | BPF_ATOMIC | BPF_H:
+       case BPF_STX | BPF_ATOMIC | BPF_B:
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
+               if (bpf_atomic_is_load_store(insn))
+                       return false;
+               return IS_ENABLED(CONFIG_PPC64);
+       }
+       return true;
+}
+
 void *arch_alloc_bpf_trampoline(unsigned int size)
 {
        return bpf_prog_pack_alloc(size, bpf_jit_fill_ill_insns);
diff --git a/arch/powerpc/net/bpf_jit_comp64.c 
b/arch/powerpc/net/bpf_jit_comp64.c
index 6a85cd847075..8931bded97f4 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -1164,6 +1164,32 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, 
u32 *fimage, struct code
 
                        break;
 
+               /*
+                * BPF_STX PROBE_ATOMIC (arena atomic ops)
+                */
+               case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
+               case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
+                       EMIT(PPC_RAW_ADD(dst_reg, dst_reg, 
bpf_to_ppc(ARENA_VM_START)));
+                       ret = bpf_jit_emit_atomic_ops(image, ctx, &insn[i],
+                                                     &jmp_off, &tmp_idx, 
&addrs[i + 1]);
+                       if (ret) {
+                               if (ret == -EOPNOTSUPP) {
+                                       pr_err_ratelimited(
+                                               "eBPF filter atomic op code 
%02x (@%d) unsupported\n",
+                                               code, i);
+                               }
+                               return ret;
+                       }
+                       /* LDARX/LWARX should land here on exception. */
+                       ret = bpf_add_extable_entry(fp, image, fimage, pass, 
ctx,
+                                                   tmp_idx, jmp_off, dst_reg, 
code);
+                       if (ret)
+                               return ret;
+
+                       /* Retrieve the dst_reg */
+                       EMIT(PPC_RAW_SUB(dst_reg, dst_reg, 
bpf_to_ppc(ARENA_VM_START)));
+                       break;
+
                /*
                 * BPF_STX ATOMIC (atomic ops)
                 */
-- 
2.43.5


Reply via email to