Adding instructions for ppc32 for

atomic_and
atomic_or
atomic_xor
atomic_fetch_add
atomic_fetch_and
atomic_fetch_or
atomic_fetch_xor

Signed-off-by: Hari Bathini <hbath...@linux.ibm.com>
---
 arch/powerpc/net/bpf_jit_comp32.c | 45 +++++++++++++++++++++----------
 1 file changed, 31 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/net/bpf_jit_comp32.c 
b/arch/powerpc/net/bpf_jit_comp32.c
index e46ed1e8c6ca..5604ae1b60ab 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -798,25 +798,42 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, 
struct codegen_context *
                 * BPF_STX ATOMIC (atomic ops)
                 */
                case BPF_STX | BPF_ATOMIC | BPF_W:
-                       if (imm != BPF_ADD) {
-                               pr_err_ratelimited("eBPF filter atomic op code 
%02x (@%d) unsupported\n",
-                                                  code, i);
-                               return -ENOTSUPP;
-                       }
-
-                       /* *(u32 *)(dst + off) += src */
-
                        bpf_set_seen_register(ctx, tmp_reg);
                        /* Get offset into TMP_REG */
                        EMIT(PPC_RAW_LI(tmp_reg, off));
+                       tmp_idx = ctx->idx * 4;
                        /* load value from memory into r0 */
                        EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
-                       /* add value from src_reg into this */
-                       EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
-                       /* store result back */
-                       EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
-                       /* we're done if this succeeded */
-                       PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4);
+                       switch (imm) {
+                       case BPF_ADD:
+                       case BPF_ADD | BPF_FETCH:
+                               EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
+                               goto atomic_ops;
+                       case BPF_AND:
+                       case BPF_AND | BPF_FETCH:
+                               EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
+                               goto atomic_ops;
+                       case BPF_OR:
+                       case BPF_OR | BPF_FETCH:
+                               EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
+                               goto atomic_ops;
+                       case BPF_XOR:
+                       case BPF_XOR | BPF_FETCH:
+                               EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
+atomic_ops:
+                               /* For the BPF_FETCH variant, get old data into 
src_reg */
+                               if (imm & BPF_FETCH)
+                                       EMIT(PPC_RAW_LWARX(src_reg, tmp_reg, 
dst_reg, 0));
+                               /* store result back */
+                               EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
+                               /* we're done if this succeeded */
+                               PPC_BCC_SHORT(COND_NE, tmp_idx);
+                               break;
+                       default:
+                               pr_err_ratelimited("eBPF filter atomic op code 
%02x (@%d) unsupported\n",
+                                                  code, i);
+                               return -EOPNOTSUPP;
+                       }
                        break;
 
                case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += 
src */
-- 
2.35.1

Reply via email to