OpenWRT users reported regression on ARMv6 devices after updating to latest
HEAD, where tcpdump filter:

tcpdump "not ether host 3c37121a2b3c and not ether host 184ecbca2a3a \
and not ether host 14130b4d3f47 and not ether host f0f61cf440b7 \
and not ether host a84b4dedf471 and not ether host d022be17e1d7 \
and not ether host 5c497967208b and not ether host 706655784d5b"

fails with warning: "Kernel filter failed: No error information"
when using config:
 # CONFIG_BPF_JIT_ALWAYS_ON is not set
 CONFIG_BPF_JIT_DEFAULT_ON=y

The issue arises because commits:
1. "bpf: Fix array bounds error with may_goto" changed default runtime to
   __bpf_prog_ret0_warn when jit_requested = 1
2. "bpf: Avoid __bpf_prog_ret0_warn when jit fails" returns error when
   jit_requested = 1 but jit fails

This change restores interpreter fallback capability for BPF programs with
stack size <= 512 bytes when jit fails.

Reported-by: Felix Fietkau <n...@nbd.name>
Closes: 
https://lore.kernel.org/bpf/2e267b4b-0540-45d8-9310-e127bf95f...@nbd.name/
Fixes: 6ebc5030e0c5 ("bpf: Fix array bounds error with may_goto")
Signed-off-by: KaFai Wan <kafai....@linux.dev>
---
 kernel/bpf/core.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 5d1650af899d..f8f8ac3b5513 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2366,8 +2366,7 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
                                         const struct bpf_insn *insn)
 {
        /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
-        * is not working properly, or interpreter is being used when
-        * prog->jit_requested is not 0, so warn about it!
+        * is not working properly, so warn about it!
         */
        WARN_ON_ONCE(1);
        return 0;
@@ -2468,8 +2467,9 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
        return ret;
 }
 
-static void bpf_prog_select_func(struct bpf_prog *fp)
+static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
 {
+       bool select_interpreter = false;
 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
        u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
        u32 idx = (round_up(stack_depth, 32) / 32) - 1;
@@ -2478,15 +2478,16 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
         * But for non-JITed programs, we don't need bpf_func, so no bounds
         * check needed.
         */
-       if (!fp->jit_requested &&
-           !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
+       if (idx < ARRAY_SIZE(interpreters)) {
                fp->bpf_func = interpreters[idx];
+               select_interpreter = true;
        } else {
                fp->bpf_func = __bpf_prog_ret0_warn;
        }
 #else
        fp->bpf_func = __bpf_prog_ret0_warn;
 #endif
+       return select_interpreter;
 }
 
 /**
@@ -2505,7 +2506,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog 
*fp, int *err)
        /* In case of BPF to BPF calls, verifier did all the prep
         * work with regards to JITing, etc.
         */
-       bool jit_needed = fp->jit_requested;
+       bool jit_needed = false;
 
        if (fp->bpf_func)
                goto finalize;
@@ -2514,7 +2515,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog 
*fp, int *err)
            bpf_prog_has_kfunc_call(fp))
                jit_needed = true;
 
-       bpf_prog_select_func(fp);
+       if (!bpf_prog_select_interpreter(fp))
+               jit_needed = true;
 
        /* eBPF JITs can rewrite the program in case constant
         * blinding is active. However, in case of error during
-- 
2.43.0


Reply via email to