[RFC PATCH v2 tip 1/7] Extended BPF core framework

2014-02-05 Thread Alexei Starovoitov
Extended BPF (or 64-bit BPF) is an instruction set to
create safe dynamically loadable filters that can call fixed set
of kernel functions and take generic bpf_context as an input.
BPF filter is a glue between kernel functions and bpf_context.
Different kernel subsystems can define their own set of available functions
and alter BPF machinery for specific use case.

include/linux/bpf.h - instruction set definition
kernel/bpf_jit/bpf_check.c - code safety checker/static analyzer
kernel/bpf_jit/bpf_run.c - emulator for archs without BPF64_JIT

Extended BPF instruction set is designed for efficient mapping to native
instructions on 64-bit CPUs

Signed-off-by: Alexei Starovoitov 
---
 include/linux/bpf.h|  149 +++
 include/linux/bpf_jit.h|  134 ++
 kernel/Makefile|1 +
 kernel/bpf_jit/Makefile|3 +
 kernel/bpf_jit/bpf_check.c | 1054 
 kernel/bpf_jit/bpf_run.c   |  511 +
 lib/Kconfig.debug  |   15 +
 7 files changed, 1867 insertions(+)
 create mode 100644 include/linux/bpf.h
 create mode 100644 include/linux/bpf_jit.h
 create mode 100644 kernel/bpf_jit/Makefile
 create mode 100644 kernel/bpf_jit/bpf_check.c
 create mode 100644 kernel/bpf_jit/bpf_run.c

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
new file mode 100644
index 000..a4e18e9
--- /dev/null
+++ b/include/linux/bpf.h
@@ -0,0 +1,149 @@
+/* 64-bit BPF is Copyright (c) 2011-2014, PLUMgrid, http://plumgrid.com */
+
+#ifndef __LINUX_BPF_H__
+#define __LINUX_BPF_H__
+
+#include 
+
+struct bpf_insn {
+   __u8code;/* opcode */
+   __u8a_reg:4; /* dest register*/
+   __u8x_reg:4; /* source register */
+   __s16   off; /* signed offset */
+   __s32   imm; /* signed immediate constant */
+};
+
+struct bpf_table {
+   __u32   type;
+   __u32   key_size;
+   __u32   elem_size;
+   __u32   max_entries;
+   __u32   param1; /* meaning is table-dependent */
+};
+
+enum bpf_table_type {
+   BPF_TABLE_HASH = 1,
+   BPF_TABLE_LPM
+};
+
+/* maximum number of insns and tables in a BPF program */
+#define MAX_BPF_INSNS 4096
+#define MAX_BPF_TABLES 64
+#define MAX_BPF_STRTAB_SIZE 1024
+
+/* pointer to bpf_context is the first and only argument to BPF program
+ * its definition is use-case specific */
+struct bpf_context;
+
+/* bpf_add|sub|...: a += x
+ * bpf_mov: a = x
+ *   bpf_bswap: bswap a */
+#define BPF_INSN_ALU(op, a, x) \
+   (struct bpf_insn){BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0}
+
+/* bpf_add|sub|...: a += imm
+ * bpf_mov: a = imm */
+#define BPF_INSN_ALU_IMM(op, a, imm) \
+   (struct bpf_insn){BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm}
+
+/* a = *(uint *) (x + off) */
+#define BPF_INSN_LD(size, a, x, off) \
+   (struct bpf_insn){BPF_LDX|BPF_SIZE(size)|BPF_REL, a, x, off, 0}
+
+/* *(uint *) (a + off) = x */
+#define BPF_INSN_ST(size, a, off, x) \
+   (struct bpf_insn){BPF_STX|BPF_SIZE(size)|BPF_REL, a, x, off, 0}
+
+/* *(uint *) (a + off) = imm */
+#define BPF_INSN_ST_IMM(size, a, off, imm) \
+   (struct bpf_insn){BPF_ST|BPF_SIZE(size)|BPF_REL, a, 0, off, imm}
+
+/* lock *(uint *) (a + off) += x */
+#define BPF_INSN_XADD(size, a, off, x) \
+   (struct bpf_insn){BPF_STX|BPF_SIZE(size)|BPF_XADD, a, x, off, 0}
+
+/* if (a 'op' x) pc += off else fall through */
+#define BPF_INSN_JUMP(op, a, x, off) \
+   (struct bpf_insn){BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0}
+
+/* if (a 'op' imm) pc += off else fall through */
+#define BPF_INSN_JUMP_IMM(op, a, imm, off) \
+   (struct bpf_insn){BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm}
+
+#define BPF_INSN_RET() \
+   (struct bpf_insn){BPF_RET|BPF_K, 0, 0, 0, 0}
+
+#define BPF_INSN_CALL(fn_code) \
+   (struct bpf_insn){BPF_JMP|BPF_CALL, 0, 0, 0, fn_code}
+
+/* Instruction classes */
+#define BPF_CLASS(code) ((code) & 0x07)
+#define BPF_LD  0x00
+#define BPF_LDX 0x01
+#define BPF_ST  0x02
+#define BPF_STX 0x03
+#define BPF_ALU 0x04
+#define BPF_JMP 0x05
+#define BPF_RET 0x06
+
+/* ld/ldx fields */
+#define BPF_SIZE(code)  ((code) & 0x18)
+#define BPF_W   0x00
+#define BPF_H   0x08
+#define BPF_B   0x10
+#define BPF_DW  0x18
+#define BPF_MODE(code)  ((code) & 0xe0)
+#define BPF_IMM 0x00
+#define BPF_ABS 0x20
+#define BPF_IND 0x40
+#define BPF_MEM 0x60
+#define BPF_LEN 0x80
+#define BPF_MSH 0xa0
+#define BPF_REL 0xc0
+#define BPF_XADD0xe0 /* exclusive add */
+
+/* alu/jmp fields */
+#define BPF_OP(code)((code) & 0xf0)
+#define BPF_ADD 0x00
+#define BPF_SUB 0x10
+#define BPF_MUL 0x20
+#define BPF_DIV 0x30
+#define   

[RFC PATCH v2 tip 1/7] Extended BPF core framework

2014-02-05 Thread Alexei Starovoitov
Extended BPF (or 64-bit BPF) is an instruction set to
create safe dynamically loadable filters that can call fixed set
of kernel functions and take generic bpf_context as an input.
BPF filter is a glue between kernel functions and bpf_context.
Different kernel subsystems can define their own set of available functions
and alter BPF machinery for specific use case.

include/linux/bpf.h - instruction set definition
kernel/bpf_jit/bpf_check.c - code safety checker/static analyzer
kernel/bpf_jit/bpf_run.c - emulator for archs without BPF64_JIT

Extended BPF instruction set is designed for efficient mapping to native
instructions on 64-bit CPUs

Signed-off-by: Alexei Starovoitov 
---
 include/linux/bpf.h|  149 +++
 include/linux/bpf_jit.h|  134 ++
 kernel/Makefile|1 +
 kernel/bpf_jit/Makefile|3 +
 kernel/bpf_jit/bpf_check.c | 1054 
 kernel/bpf_jit/bpf_run.c   |  511 +
 lib/Kconfig.debug  |   15 +
 7 files changed, 1867 insertions(+)
 create mode 100644 include/linux/bpf.h
 create mode 100644 include/linux/bpf_jit.h
 create mode 100644 kernel/bpf_jit/Makefile
 create mode 100644 kernel/bpf_jit/bpf_check.c
 create mode 100644 kernel/bpf_jit/bpf_run.c

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
new file mode 100644
index 000..a4e18e9
--- /dev/null
+++ b/include/linux/bpf.h
@@ -0,0 +1,149 @@
+/* 64-bit BPF is Copyright (c) 2011-2014, PLUMgrid, http://plumgrid.com */
+
+#ifndef __LINUX_BPF_H__
+#define __LINUX_BPF_H__
+
+#include 
+
+struct bpf_insn {
+   __u8code;/* opcode */
+   __u8a_reg:4; /* dest register*/
+   __u8x_reg:4; /* source register */
+   __s16   off; /* signed offset */
+   __s32   imm; /* signed immediate constant */
+};
+
+struct bpf_table {
+   __u32   type;
+   __u32   key_size;
+   __u32   elem_size;
+   __u32   max_entries;
+   __u32   param1; /* meaning is table-dependent */
+};
+
+enum bpf_table_type {
+   BPF_TABLE_HASH = 1,
+   BPF_TABLE_LPM
+};
+
+/* maximum number of insns and tables in a BPF program */
+#define MAX_BPF_INSNS 4096
+#define MAX_BPF_TABLES 64
+#define MAX_BPF_STRTAB_SIZE 1024
+
+/* pointer to bpf_context is the first and only argument to BPF program
+ * its definition is use-case specific */
+struct bpf_context;
+
+/* bpf_add|sub|...: a += x
+ * bpf_mov: a = x
+ *   bpf_bswap: bswap a */
+#define BPF_INSN_ALU(op, a, x) \
+   (struct bpf_insn){BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0}
+
+/* bpf_add|sub|...: a += imm
+ * bpf_mov: a = imm */
+#define BPF_INSN_ALU_IMM(op, a, imm) \
+   (struct bpf_insn){BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm}
+
+/* a = *(uint *) (x + off) */
+#define BPF_INSN_LD(size, a, x, off) \
+   (struct bpf_insn){BPF_LDX|BPF_SIZE(size)|BPF_REL, a, x, off, 0}
+
+/* *(uint *) (a + off) = x */
+#define BPF_INSN_ST(size, a, off, x) \
+   (struct bpf_insn){BPF_STX|BPF_SIZE(size)|BPF_REL, a, x, off, 0}
+
+/* *(uint *) (a + off) = imm */
+#define BPF_INSN_ST_IMM(size, a, off, imm) \
+   (struct bpf_insn){BPF_ST|BPF_SIZE(size)|BPF_REL, a, 0, off, imm}
+
+/* lock *(uint *) (a + off) += x */
+#define BPF_INSN_XADD(size, a, off, x) \
+   (struct bpf_insn){BPF_STX|BPF_SIZE(size)|BPF_XADD, a, x, off, 0}
+
+/* if (a 'op' x) pc += off else fall through */
+#define BPF_INSN_JUMP(op, a, x, off) \
+   (struct bpf_insn){BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0}
+
+/* if (a 'op' imm) pc += off else fall through */
+#define BPF_INSN_JUMP_IMM(op, a, imm, off) \
+   (struct bpf_insn){BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm}
+
+#define BPF_INSN_RET() \
+   (struct bpf_insn){BPF_RET|BPF_K, 0, 0, 0, 0}
+
+#define BPF_INSN_CALL(fn_code) \
+   (struct bpf_insn){BPF_JMP|BPF_CALL, 0, 0, 0, fn_code}
+
+/* Instruction classes */
+#define BPF_CLASS(code) ((code) & 0x07)
+#define BPF_LD  0x00
+#define BPF_LDX 0x01
+#define BPF_ST  0x02
+#define BPF_STX 0x03
+#define BPF_ALU 0x04
+#define BPF_JMP 0x05
+#define BPF_RET 0x06
+
+/* ld/ldx fields */
+#define BPF_SIZE(code)  ((code) & 0x18)
+#define BPF_W   0x00
+#define BPF_H   0x08
+#define BPF_B   0x10
+#define BPF_DW  0x18
+#define BPF_MODE(code)  ((code) & 0xe0)
+#define BPF_IMM 0x00
+#define BPF_ABS 0x20
+#define BPF_IND 0x40
+#define BPF_MEM 0x60
+#define BPF_LEN 0x80
+#define BPF_MSH 0xa0
+#define BPF_REL 0xc0
+#define BPF_XADD0xe0 /* exclusive add */
+
+/* alu/jmp fields */
+#define BPF_OP(code)((code) & 0xf0)
+#define BPF_ADD 0x00
+#define BPF_SUB 0x10
+#define BPF_MUL 0x20
+#define BPF_DIV 0x30
+#define   

[RFC PATCH v2 tip 1/7] Extended BPF core framework

2014-02-05 Thread Alexei Starovoitov
Extended BPF (or 64-bit BPF) is an instruction set to
create safe dynamically loadable filters that can call fixed set
of kernel functions and take generic bpf_context as an input.
BPF filter is a glue between kernel functions and bpf_context.
Different kernel subsystems can define their own set of available functions
and alter BPF machinery for specific use case.

include/linux/bpf.h - instruction set definition
kernel/bpf_jit/bpf_check.c - code safety checker/static analyzer
kernel/bpf_jit/bpf_run.c - emulator for archs without BPF64_JIT

Extended BPF instruction set is designed for efficient mapping to native
instructions on 64-bit CPUs

Signed-off-by: Alexei Starovoitov a...@plumgrid.com
---
 include/linux/bpf.h|  149 +++
 include/linux/bpf_jit.h|  134 ++
 kernel/Makefile|1 +
 kernel/bpf_jit/Makefile|3 +
 kernel/bpf_jit/bpf_check.c | 1054 
 kernel/bpf_jit/bpf_run.c   |  511 +
 lib/Kconfig.debug  |   15 +
 7 files changed, 1867 insertions(+)
 create mode 100644 include/linux/bpf.h
 create mode 100644 include/linux/bpf_jit.h
 create mode 100644 kernel/bpf_jit/Makefile
 create mode 100644 kernel/bpf_jit/bpf_check.c
 create mode 100644 kernel/bpf_jit/bpf_run.c

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
new file mode 100644
index 000..a4e18e9
--- /dev/null
+++ b/include/linux/bpf.h
@@ -0,0 +1,149 @@
+/* 64-bit BPF is Copyright (c) 2011-2014, PLUMgrid, http://plumgrid.com */
+
+#ifndef __LINUX_BPF_H__
+#define __LINUX_BPF_H__
+
+#include linux/types.h
+
+struct bpf_insn {
+   __u8code;/* opcode */
+   __u8a_reg:4; /* dest register*/
+   __u8x_reg:4; /* source register */
+   __s16   off; /* signed offset */
+   __s32   imm; /* signed immediate constant */
+};
+
+struct bpf_table {
+   __u32   type;
+   __u32   key_size;
+   __u32   elem_size;
+   __u32   max_entries;
+   __u32   param1; /* meaning is table-dependent */
+};
+
+enum bpf_table_type {
+   BPF_TABLE_HASH = 1,
+   BPF_TABLE_LPM
+};
+
+/* maximum number of insns and tables in a BPF program */
+#define MAX_BPF_INSNS 4096
+#define MAX_BPF_TABLES 64
+#define MAX_BPF_STRTAB_SIZE 1024
+
+/* pointer to bpf_context is the first and only argument to BPF program
+ * its definition is use-case specific */
+struct bpf_context;
+
+/* bpf_add|sub|...: a += x
+ * bpf_mov: a = x
+ *   bpf_bswap: bswap a */
+#define BPF_INSN_ALU(op, a, x) \
+   (struct bpf_insn){BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0}
+
+/* bpf_add|sub|...: a += imm
+ * bpf_mov: a = imm */
+#define BPF_INSN_ALU_IMM(op, a, imm) \
+   (struct bpf_insn){BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm}
+
+/* a = *(uint *) (x + off) */
+#define BPF_INSN_LD(size, a, x, off) \
+   (struct bpf_insn){BPF_LDX|BPF_SIZE(size)|BPF_REL, a, x, off, 0}
+
+/* *(uint *) (a + off) = x */
+#define BPF_INSN_ST(size, a, off, x) \
+   (struct bpf_insn){BPF_STX|BPF_SIZE(size)|BPF_REL, a, x, off, 0}
+
+/* *(uint *) (a + off) = imm */
+#define BPF_INSN_ST_IMM(size, a, off, imm) \
+   (struct bpf_insn){BPF_ST|BPF_SIZE(size)|BPF_REL, a, 0, off, imm}
+
+/* lock *(uint *) (a + off) += x */
+#define BPF_INSN_XADD(size, a, off, x) \
+   (struct bpf_insn){BPF_STX|BPF_SIZE(size)|BPF_XADD, a, x, off, 0}
+
+/* if (a 'op' x) pc += off else fall through */
+#define BPF_INSN_JUMP(op, a, x, off) \
+   (struct bpf_insn){BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0}
+
+/* if (a 'op' imm) pc += off else fall through */
+#define BPF_INSN_JUMP_IMM(op, a, imm, off) \
+   (struct bpf_insn){BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm}
+
+#define BPF_INSN_RET() \
+   (struct bpf_insn){BPF_RET|BPF_K, 0, 0, 0, 0}
+
+#define BPF_INSN_CALL(fn_code) \
+   (struct bpf_insn){BPF_JMP|BPF_CALL, 0, 0, 0, fn_code}
+
+/* Instruction classes */
+#define BPF_CLASS(code) ((code)  0x07)
+#define BPF_LD  0x00
+#define BPF_LDX 0x01
+#define BPF_ST  0x02
+#define BPF_STX 0x03
+#define BPF_ALU 0x04
+#define BPF_JMP 0x05
+#define BPF_RET 0x06
+
+/* ld/ldx fields */
+#define BPF_SIZE(code)  ((code)  0x18)
+#define BPF_W   0x00
+#define BPF_H   0x08
+#define BPF_B   0x10
+#define BPF_DW  0x18
+#define BPF_MODE(code)  ((code)  0xe0)
+#define BPF_IMM 0x00
+#define BPF_ABS 0x20
+#define BPF_IND 0x40
+#define BPF_MEM 0x60
+#define BPF_LEN 0x80
+#define BPF_MSH 0xa0
+#define BPF_REL 0xc0
+#define BPF_XADD0xe0 /* exclusive add */
+
+/* alu/jmp fields */
+#define BPF_OP(code)((code)  0xf0)
+#define BPF_ADD 0x00
+#define BPF_SUB 0x10
+#define BPF_MUL 0x20
+#define 

[RFC PATCH v2 tip 1/7] Extended BPF core framework

2014-02-05 Thread Alexei Starovoitov
Extended BPF (or 64-bit BPF) is an instruction set to
create safe dynamically loadable filters that can call fixed set
of kernel functions and take generic bpf_context as an input.
BPF filter is a glue between kernel functions and bpf_context.
Different kernel subsystems can define their own set of available functions
and alter BPF machinery for specific use case.

include/linux/bpf.h - instruction set definition
kernel/bpf_jit/bpf_check.c - code safety checker/static analyzer
kernel/bpf_jit/bpf_run.c - emulator for archs without BPF64_JIT

Extended BPF instruction set is designed for efficient mapping to native
instructions on 64-bit CPUs

Signed-off-by: Alexei Starovoitov a...@plumgrid.com
---
 include/linux/bpf.h|  149 +++
 include/linux/bpf_jit.h|  134 ++
 kernel/Makefile|1 +
 kernel/bpf_jit/Makefile|3 +
 kernel/bpf_jit/bpf_check.c | 1054 
 kernel/bpf_jit/bpf_run.c   |  511 +
 lib/Kconfig.debug  |   15 +
 7 files changed, 1867 insertions(+)
 create mode 100644 include/linux/bpf.h
 create mode 100644 include/linux/bpf_jit.h
 create mode 100644 kernel/bpf_jit/Makefile
 create mode 100644 kernel/bpf_jit/bpf_check.c
 create mode 100644 kernel/bpf_jit/bpf_run.c

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
new file mode 100644
index 000..a4e18e9
--- /dev/null
+++ b/include/linux/bpf.h
@@ -0,0 +1,149 @@
+/* 64-bit BPF is Copyright (c) 2011-2014, PLUMgrid, http://plumgrid.com */
+
+#ifndef __LINUX_BPF_H__
+#define __LINUX_BPF_H__
+
+#include linux/types.h
+
+struct bpf_insn {
+   __u8code;/* opcode */
+   __u8a_reg:4; /* dest register*/
+   __u8x_reg:4; /* source register */
+   __s16   off; /* signed offset */
+   __s32   imm; /* signed immediate constant */
+};
+
+struct bpf_table {
+   __u32   type;
+   __u32   key_size;
+   __u32   elem_size;
+   __u32   max_entries;
+   __u32   param1; /* meaning is table-dependent */
+};
+
+enum bpf_table_type {
+   BPF_TABLE_HASH = 1,
+   BPF_TABLE_LPM
+};
+
+/* maximum number of insns and tables in a BPF program */
+#define MAX_BPF_INSNS 4096
+#define MAX_BPF_TABLES 64
+#define MAX_BPF_STRTAB_SIZE 1024
+
+/* pointer to bpf_context is the first and only argument to BPF program
+ * its definition is use-case specific */
+struct bpf_context;
+
+/* bpf_add|sub|...: a += x
+ * bpf_mov: a = x
+ *   bpf_bswap: bswap a */
+#define BPF_INSN_ALU(op, a, x) \
+   (struct bpf_insn){BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0}
+
+/* bpf_add|sub|...: a += imm
+ * bpf_mov: a = imm */
+#define BPF_INSN_ALU_IMM(op, a, imm) \
+   (struct bpf_insn){BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm}
+
+/* a = *(uint *) (x + off) */
+#define BPF_INSN_LD(size, a, x, off) \
+   (struct bpf_insn){BPF_LDX|BPF_SIZE(size)|BPF_REL, a, x, off, 0}
+
+/* *(uint *) (a + off) = x */
+#define BPF_INSN_ST(size, a, off, x) \
+   (struct bpf_insn){BPF_STX|BPF_SIZE(size)|BPF_REL, a, x, off, 0}
+
+/* *(uint *) (a + off) = imm */
+#define BPF_INSN_ST_IMM(size, a, off, imm) \
+   (struct bpf_insn){BPF_ST|BPF_SIZE(size)|BPF_REL, a, 0, off, imm}
+
+/* lock *(uint *) (a + off) += x */
+#define BPF_INSN_XADD(size, a, off, x) \
+   (struct bpf_insn){BPF_STX|BPF_SIZE(size)|BPF_XADD, a, x, off, 0}
+
+/* if (a 'op' x) pc += off else fall through */
+#define BPF_INSN_JUMP(op, a, x, off) \
+   (struct bpf_insn){BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0}
+
+/* if (a 'op' imm) pc += off else fall through */
+#define BPF_INSN_JUMP_IMM(op, a, imm, off) \
+   (struct bpf_insn){BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm}
+
+#define BPF_INSN_RET() \
+   (struct bpf_insn){BPF_RET|BPF_K, 0, 0, 0, 0}
+
+#define BPF_INSN_CALL(fn_code) \
+   (struct bpf_insn){BPF_JMP|BPF_CALL, 0, 0, 0, fn_code}
+
+/* Instruction classes */
+#define BPF_CLASS(code) ((code)  0x07)
+#define BPF_LD  0x00
+#define BPF_LDX 0x01
+#define BPF_ST  0x02
+#define BPF_STX 0x03
+#define BPF_ALU 0x04
+#define BPF_JMP 0x05
+#define BPF_RET 0x06
+
+/* ld/ldx fields */
+#define BPF_SIZE(code)  ((code)  0x18)
+#define BPF_W   0x00
+#define BPF_H   0x08
+#define BPF_B   0x10
+#define BPF_DW  0x18
+#define BPF_MODE(code)  ((code)  0xe0)
+#define BPF_IMM 0x00
+#define BPF_ABS 0x20
+#define BPF_IND 0x40
+#define BPF_MEM 0x60
+#define BPF_LEN 0x80
+#define BPF_MSH 0xa0
+#define BPF_REL 0xc0
+#define BPF_XADD0xe0 /* exclusive add */
+
+/* alu/jmp fields */
+#define BPF_OP(code)((code)  0xf0)
+#define BPF_ADD 0x00
+#define BPF_SUB 0x10
+#define BPF_MUL 0x20
+#define