Module Name:    src
Committed By:   alnsn
Date:           Fri Jul 29 20:29:38 UTC 2016

Modified Files:
        src/sys/net: bpfjit.c

Log Message:
Don't trigger BJ_ASSERT(false) on invalid BPF_Jxxx opcode in jmp_to_op().

This change helps survive AFL fuzzing without calling bpf_validate() first.

Also change alu_to_op() function to have a similar interface.


To generate a diff of this commit:
cvs rdiff -u -r1.45 -r1.46 src/sys/net/bpfjit.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/net/bpfjit.c
diff -u src/sys/net/bpfjit.c:1.45 src/sys/net/bpfjit.c:1.46
--- src/sys/net/bpfjit.c:1.45	Sun May 29 17:20:22 2016
+++ src/sys/net/bpfjit.c	Fri Jul 29 20:29:38 2016
@@ -1,4 +1,4 @@
-/*	$NetBSD: bpfjit.c,v 1.45 2016/05/29 17:20:22 alnsn Exp $	*/
+/*	$NetBSD: bpfjit.c,v 1.46 2016/07/29 20:29:38 alnsn Exp $	*/
 
 /*-
  * Copyright (c) 2011-2015 Alexander Nasonov.
@@ -31,9 +31,9 @@
 
 #include <sys/cdefs.h>
 #ifdef _KERNEL
-__KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.45 2016/05/29 17:20:22 alnsn Exp $");
+__KERNEL_RCSID(0, "$NetBSD: bpfjit.c,v 1.46 2016/07/29 20:29:38 alnsn Exp $");
 #else
-__RCSID("$NetBSD: bpfjit.c,v 1.45 2016/05/29 17:20:22 alnsn Exp $");
+__RCSID("$NetBSD: bpfjit.c,v 1.46 2016/07/29 20:29:38 alnsn Exp $");
 #endif
 
 #include <sys/types.h>
@@ -1594,10 +1594,9 @@ optimize(const bpf_ctx_t *bc, const stru
 /*
  * Convert BPF_ALU operations except BPF_NEG and BPF_DIV to sljit operation.
  */
-static int
-bpf_alu_to_sljit_op(const struct bpf_insn *pc)
+static bool
+alu_to_op(const struct bpf_insn *pc, int *res)
 {
-	const int bad = SLJIT_UNUSED;
 	const uint32_t k = pc->k;
 
 	/*
@@ -1605,49 +1604,64 @@ bpf_alu_to_sljit_op(const struct bpf_ins
 	 * instruction so SLJIT_I32_OP doesn't have any overhead.
 	 */
 	switch (BPF_OP(pc->code)) {
-	case BPF_ADD: return SLJIT_ADD;
-	case BPF_SUB: return SLJIT_SUB;
-	case BPF_MUL: return SLJIT_MUL|SLJIT_I32_OP;
-	case BPF_OR:  return SLJIT_OR;
-	case BPF_XOR: return SLJIT_XOR;
-	case BPF_AND: return SLJIT_AND;
-	case BPF_LSH: return (k > 31) ? bad : SLJIT_SHL;
-	case BPF_RSH: return (k > 31) ? bad : SLJIT_LSHR|SLJIT_I32_OP;
+	case BPF_ADD:
+		*res = SLJIT_ADD;
+		return true;
+	case BPF_SUB:
+		*res = SLJIT_SUB;
+		return true;
+	case BPF_MUL:
+		*res = SLJIT_MUL|SLJIT_I32_OP;
+		return true;
+	case BPF_OR:
+		*res = SLJIT_OR;
+		return true;
+	case BPF_XOR:
+		*res = SLJIT_XOR;
+		return true;
+	case BPF_AND:
+		*res = SLJIT_AND;
+		return true;
+	case BPF_LSH:
+		*res = SLJIT_SHL;
+		return k < 32;
+	case BPF_RSH:
+		*res = SLJIT_LSHR|SLJIT_I32_OP;
+		return k < 32;
 	default:
-		return bad;
+		return false;
 	}
 }
 
 /*
  * Convert BPF_JMP operations except BPF_JA to sljit condition.
  */
-static int
-bpf_jmp_to_sljit_cond(const struct bpf_insn *pc, bool negate)
+static bool
+jmp_to_cond(const struct bpf_insn *pc, bool negate, int *res)
 {
+
 	/*
 	 * Note: all supported 64bit arches have 32bit comparison
 	 * instructions so SLJIT_I32_OP doesn't have any overhead.
 	 */
-	int rv = SLJIT_I32_OP;
+	*res = SLJIT_I32_OP;
 
 	switch (BPF_OP(pc->code)) {
 	case BPF_JGT:
-		rv |= negate ? SLJIT_LESS_EQUAL : SLJIT_GREATER;
-		break;
+		*res |= negate ? SLJIT_LESS_EQUAL : SLJIT_GREATER;
+		return true;
 	case BPF_JGE:
-		rv |= negate ? SLJIT_LESS : SLJIT_GREATER_EQUAL;
-		break;
+		*res |= negate ? SLJIT_LESS : SLJIT_GREATER_EQUAL;
+		return true;
 	case BPF_JEQ:
-		rv |= negate ? SLJIT_NOT_EQUAL : SLJIT_EQUAL;
-		break;
+		*res |= negate ? SLJIT_NOT_EQUAL : SLJIT_EQUAL;
+		return true;
 	case BPF_JSET:
-		rv |= negate ? SLJIT_EQUAL : SLJIT_NOT_EQUAL;
-		break;
+		*res |= negate ? SLJIT_EQUAL : SLJIT_NOT_EQUAL;
+		return true;
 	default:
-		BJ_ASSERT(false);
+		return false;
 	}
-
-	return rv;
 }
 
 /*
@@ -1695,9 +1709,9 @@ generate_insn_code(struct sljit_compiler
 	struct sljit_jump *to_mchain_jump;
 
 	size_t i;
-	int status;
-	int branching, negate;
 	unsigned int rval, mode, src, op;
+	int branching, negate;
+	int status, cond, op2;
 	uint32_t jt, jf;
 
 	bool unconditional_ret;
@@ -1935,10 +1949,9 @@ generate_insn_code(struct sljit_compiler
 
 			op = BPF_OP(pc->code);
 			if (op != BPF_DIV && op != BPF_MOD) {
-				const int op2 = bpf_alu_to_sljit_op(pc);
-
-				if (op2 == SLJIT_UNUSED)
+				if (!alu_to_op(pc, &op2))
 					goto fail;
+
 				status = sljit_emit_op2(compiler,
 				    op2, BJ_AREG, 0, BJ_AREG, 0,
 				    kx_to_reg(pc), kx_to_reg_arg(pc));
@@ -2005,9 +2018,10 @@ generate_insn_code(struct sljit_compiler
 
 			if (branching) {
 				if (op != BPF_JSET) {
+					if (!jmp_to_cond(pc, negate, &cond))
+						goto fail;
 					jump = sljit_emit_cmp(compiler,
-					    bpf_jmp_to_sljit_cond(pc, negate),
-					    BJ_AREG, 0,
+					    cond, BJ_AREG, 0,
 					    kx_to_reg(pc), kx_to_reg_arg(pc));
 				} else {
 					status = sljit_emit_op2(compiler,
@@ -2018,10 +2032,10 @@ generate_insn_code(struct sljit_compiler
 					if (status != SLJIT_SUCCESS)
 						goto fail;
 
+					if (!jmp_to_cond(pc, negate, &cond))
+						goto fail;
 					jump = sljit_emit_cmp(compiler,
-					    bpf_jmp_to_sljit_cond(pc, negate),
-					    BJ_TMP1REG, 0,
-					    SLJIT_IMM, 0);
+					    cond, BJ_TMP1REG, 0, SLJIT_IMM, 0);
 				}
 
 				if (jump == NULL)

Reply via email to