move instruction macros (like BPF_MOV64_REG or BPF_ALU32_IMM)
from linux/filter.h into uapi/linux/bpf.h
so that userspace programs can use them.

verifier testsuite (in later patches) will be using them.

Signed-off-by: Alexei Starovoitov <a...@plumgrid.com>
---
 include/linux/filter.h   |  226 ----------------------------------------------
 include/uapi/linux/bpf.h |  226 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 226 insertions(+), 226 deletions(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index ff77842af3e1..c220b27b10df 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -37,232 +37,6 @@ struct bpf_prog_info;
 /* BPF program can access up to 512 bytes of stack space. */
 #define MAX_BPF_STACK  512
 
-/* Helper macros for filter block array initializers. */
-
-/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
-
-#define BPF_ALU64_REG(OP, DST, SRC)                            \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,        \
-               .dst_reg = DST,                                 \
-               .src_reg = SRC,                                 \
-               .off   = 0,                                     \
-               .imm   = 0 })
-
-#define BPF_ALU32_REG(OP, DST, SRC)                            \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ALU | BPF_OP(OP) | BPF_X,          \
-               .dst_reg = DST,                                 \
-               .src_reg = SRC,                                 \
-               .off   = 0,                                     \
-               .imm   = 0 })
-
-/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
-
-#define BPF_ALU64_IMM(OP, DST, IMM)                            \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,        \
-               .dst_reg = DST,                                 \
-               .src_reg = 0,                                   \
-               .off   = 0,                                     \
-               .imm   = IMM })
-
-#define BPF_ALU32_IMM(OP, DST, IMM)                            \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ALU | BPF_OP(OP) | BPF_K,          \
-               .dst_reg = DST,                                 \
-               .src_reg = 0,                                   \
-               .off   = 0,                                     \
-               .imm   = IMM })
-
-/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
-
-#define BPF_ENDIAN(TYPE, DST, LEN)                             \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),     \
-               .dst_reg = DST,                                 \
-               .src_reg = 0,                                   \
-               .off   = 0,                                     \
-               .imm   = LEN })
-
-/* Short form of mov, dst_reg = src_reg */
-
-#define BPF_MOV64_REG(DST, SRC)                                        \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ALU64 | BPF_MOV | BPF_X,           \
-               .dst_reg = DST,                                 \
-               .src_reg = SRC,                                 \
-               .off   = 0,                                     \
-               .imm   = 0 })
-
-#define BPF_MOV32_REG(DST, SRC)                                        \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ALU | BPF_MOV | BPF_X,             \
-               .dst_reg = DST,                                 \
-               .src_reg = SRC,                                 \
-               .off   = 0,                                     \
-               .imm   = 0 })
-
-/* Short form of mov, dst_reg = imm32 */
-
-#define BPF_MOV64_IMM(DST, IMM)                                        \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ALU64 | BPF_MOV | BPF_K,           \
-               .dst_reg = DST,                                 \
-               .src_reg = 0,                                   \
-               .off   = 0,                                     \
-               .imm   = IMM })
-
-#define BPF_MOV32_IMM(DST, IMM)                                        \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ALU | BPF_MOV | BPF_K,             \
-               .dst_reg = DST,                                 \
-               .src_reg = 0,                                   \
-               .off   = 0,                                     \
-               .imm   = IMM })
-
-/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
-#define BPF_LD_IMM64(DST, IMM)                                 \
-       BPF_LD_IMM64_RAW(DST, 0, IMM)
-
-#define BPF_LD_IMM64_RAW(DST, SRC, IMM)                                \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_LD | BPF_DW | BPF_IMM,             \
-               .dst_reg = DST,                                 \
-               .src_reg = SRC,                                 \
-               .off   = 0,                                     \
-               .imm   = (__u32) (IMM) }),                      \
-       ((struct bpf_insn) {                                    \
-               .code  = 0, /* zero is reserved opcode */       \
-               .dst_reg = 0,                                   \
-               .src_reg = 0,                                   \
-               .off   = 0,                                     \
-               .imm   = ((__u64) (IMM)) >> 32 })
-
-#define BPF_PSEUDO_MAP_FD      1
-
-/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
-#define BPF_LD_MAP_FD(DST, MAP_FD)                             \
-       BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
-
-/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = 
imm32 */
-
-#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)                     \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),   \
-               .dst_reg = DST,                                 \
-               .src_reg = SRC,                                 \
-               .off   = 0,                                     \
-               .imm   = IMM })
-
-#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)                     \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),     \
-               .dst_reg = DST,                                 \
-               .src_reg = SRC,                                 \
-               .off   = 0,                                     \
-               .imm   = IMM })
-
-/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
-
-#define BPF_LD_ABS(SIZE, IMM)                                  \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,     \
-               .dst_reg = 0,                                   \
-               .src_reg = 0,                                   \
-               .off   = 0,                                     \
-               .imm   = IMM })
-
-/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
-
-#define BPF_LD_IND(SIZE, SRC, IMM)                             \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,     \
-               .dst_reg = 0,                                   \
-               .src_reg = SRC,                                 \
-               .off   = 0,                                     \
-               .imm   = IMM })
-
-/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
-
-#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)                       \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,    \
-               .dst_reg = DST,                                 \
-               .src_reg = SRC,                                 \
-               .off   = OFF,                                   \
-               .imm   = 0 })
-
-/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
-
-#define BPF_STX_MEM(SIZE, DST, SRC, OFF)                       \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,    \
-               .dst_reg = DST,                                 \
-               .src_reg = SRC,                                 \
-               .off   = OFF,                                   \
-               .imm   = 0 })
-
-/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
-
-#define BPF_ST_MEM(SIZE, DST, OFF, IMM)                                \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,     \
-               .dst_reg = DST,                                 \
-               .src_reg = 0,                                   \
-               .off   = OFF,                                   \
-               .imm   = IMM })
-
-/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + 
off16 */
-
-#define BPF_JMP_REG(OP, DST, SRC, OFF)                         \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_JMP | BPF_OP(OP) | BPF_X,          \
-               .dst_reg = DST,                                 \
-               .src_reg = SRC,                                 \
-               .off   = OFF,                                   \
-               .imm   = 0 })
-
-/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + 
off16 */
-
-#define BPF_JMP_IMM(OP, DST, IMM, OFF)                         \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_JMP | BPF_OP(OP) | BPF_K,          \
-               .dst_reg = DST,                                 \
-               .src_reg = 0,                                   \
-               .off   = OFF,                                   \
-               .imm   = IMM })
-
-/* Function call */
-
-#define BPF_EMIT_CALL(FUNC)                                    \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_JMP | BPF_CALL,                    \
-               .dst_reg = 0,                                   \
-               .src_reg = 0,                                   \
-               .off   = 0,                                     \
-               .imm   = ((FUNC) - __bpf_call_base) })
-
-/* Raw code statement block */
-
-#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)                 \
-       ((struct bpf_insn) {                                    \
-               .code  = CODE,                                  \
-               .dst_reg = DST,                                 \
-               .src_reg = SRC,                                 \
-               .off   = OFF,                                   \
-               .imm   = IMM })
-
-/* Program exit */
-
-#define BPF_EXIT_INSN()                                                \
-       ((struct bpf_insn) {                                    \
-               .code  = BPF_JMP | BPF_EXIT,                    \
-               .dst_reg = 0,                                   \
-               .src_reg = 0,                                   \
-               .off   = 0,                                     \
-               .imm   = 0 })
-
 #define bytes_to_bpf_size(bytes)                               \
 ({                                                             \
        int bpf_size = -EINVAL;                                 \
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 13bd4bf3b100..97edd23622fc 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -54,6 +54,232 @@ enum {
 /* BPF has 10 general purpose 64-bit registers and stack frame. */
 #define MAX_BPF_REG    __MAX_BPF_REG
 
+/* Helper macros for filter block array initializers. */
+
+/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
+
+#define BPF_ALU64_REG(OP, DST, SRC)                            \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,        \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
+#define BPF_ALU32_REG(OP, DST, SRC)                            \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_OP(OP) | BPF_X,          \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
+/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
+
+#define BPF_ALU64_IMM(OP, DST, IMM)                            \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,        \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+#define BPF_ALU32_IMM(OP, DST, IMM)                            \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_OP(OP) | BPF_K,          \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
+
+#define BPF_ENDIAN(TYPE, DST, LEN)                             \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),     \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = LEN })
+
+/* Short form of mov, dst_reg = src_reg */
+
+#define BPF_MOV64_REG(DST, SRC)                                        \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU64 | BPF_MOV | BPF_X,           \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
+#define BPF_MOV32_REG(DST, SRC)                                        \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_MOV | BPF_X,             \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
+/* Short form of mov, dst_reg = imm32 */
+
+#define BPF_MOV64_IMM(DST, IMM)                                        \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU64 | BPF_MOV | BPF_K,           \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+#define BPF_MOV32_IMM(DST, IMM)                                        \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_MOV | BPF_K,             \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
+#define BPF_LD_IMM64(DST, IMM)                                 \
+       BPF_LD_IMM64_RAW(DST, 0, IMM)
+
+#define BPF_LD_IMM64_RAW(DST, SRC, IMM)                                \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_LD | BPF_DW | BPF_IMM,             \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = (__u32) (IMM) }),                      \
+       ((struct bpf_insn) {                                    \
+               .code  = 0, /* zero is reserved opcode */       \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = ((__u64) (IMM)) >> 32 })
+
+#define BPF_PSEUDO_MAP_FD      1
+
+/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
+#define BPF_LD_MAP_FD(DST, MAP_FD)                             \
+       BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
+
+/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = 
imm32 */
+
+#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)                     \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),   \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)                     \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),     \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
+
+#define BPF_LD_ABS(SIZE, IMM)                                  \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,     \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
+
+#define BPF_LD_IND(SIZE, SRC, IMM)                             \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,     \
+               .dst_reg = 0,                                   \
+               .src_reg = SRC,                                 \
+               .off   = 0,                                     \
+               .imm   = IMM })
+
+/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
+
+#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)                       \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,    \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
+/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
+
+#define BPF_STX_MEM(SIZE, DST, SRC, OFF)                       \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,    \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
+/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
+
+#define BPF_ST_MEM(SIZE, DST, OFF, IMM)                                \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,     \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = OFF,                                   \
+               .imm   = IMM })
+
+/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + 
off16 */
+
+#define BPF_JMP_REG(OP, DST, SRC, OFF)                         \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_OP(OP) | BPF_X,          \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
+/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + 
off16 */
+
+#define BPF_JMP_IMM(OP, DST, IMM, OFF)                         \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_OP(OP) | BPF_K,          \
+               .dst_reg = DST,                                 \
+               .src_reg = 0,                                   \
+               .off   = OFF,                                   \
+               .imm   = IMM })
+
+/* Function call */
+
+#define BPF_EMIT_CALL(FUNC)                                    \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_CALL,                    \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = ((FUNC) - __bpf_call_base) })
+
+/* Raw code statement block */
+
+#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)                 \
+       ((struct bpf_insn) {                                    \
+               .code  = CODE,                                  \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = IMM })
+
+/* Program exit */
+
+#define BPF_EXIT_INSN()                                                \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_JMP | BPF_EXIT,                    \
+               .dst_reg = 0,                                   \
+               .src_reg = 0,                                   \
+               .off   = 0,                                     \
+               .imm   = 0 })
+
 struct bpf_insn {
        __u8    code;           /* opcode */
        __u8    dst_reg:4;      /* dest register */
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to