This third origin of hook call should cover all possible trigger paths
(e.g. page fault). Landlock eBPF programs can then take decisions
accordingly.

Signed-off-by: Mickaël Salaün <m...@digikod.net>
Cc: Alexei Starovoitov <a...@kernel.org>
Cc: Andy Lutomirski <l...@amacapital.net>
Cc: Daniel Borkmann <dan...@iogearbox.net>
Cc: Kees Cook <keesc...@chromium.org>
---
 include/uapi/linux/bpf.h |  3 ++-
 security/landlock/lsm.c  | 17 +++++++++++++++--
 2 files changed, 17 insertions(+), 3 deletions(-)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 12e61508f879..3cc52e51357f 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -580,7 +580,8 @@ enum landlock_hook_id {
 /* Trigger type */
 #define LANDLOCK_FLAG_ORIGIN_SYSCALL   (1 << 0)
 #define LANDLOCK_FLAG_ORIGIN_SECCOMP   (1 << 1)
-#define _LANDLOCK_FLAG_ORIGIN_MASK     ((1 << 2) - 1)
+#define LANDLOCK_FLAG_ORIGIN_INTERRUPT (1 << 2)
+#define _LANDLOCK_FLAG_ORIGIN_MASK     ((1 << 3) - 1)
 
 /* context of function access flags */
 #define _LANDLOCK_FLAG_ACCESS_MASK     ((1ULL << 0) - 1)
diff --git a/security/landlock/lsm.c b/security/landlock/lsm.c
index 000dd0c7ec3d..2a15839a08c8 100644
--- a/security/landlock/lsm.c
+++ b/security/landlock/lsm.c
@@ -17,6 +17,7 @@
 #include <linux/kernel.h> /* FIELD_SIZEOF() */
 #include <linux/landlock.h>
 #include <linux/lsm_hooks.h>
+#include <linux/preempt.h> /* in_interrupt() */
 #include <linux/seccomp.h> /* struct seccomp_* */
 #include <linux/types.h> /* uintptr_t */
 
@@ -109,6 +110,7 @@ static int landlock_run_prog(enum landlock_hook_id hook_id, 
__u64 args[6])
 #endif /* CONFIG_CGROUP_BPF */
        struct landlock_rule *rule;
        u32 hook_idx = get_index(hook_id);
+       u16 current_call;
 
        struct landlock_data ctx = {
                .hook = hook_id,
@@ -128,6 +130,16 @@ static int landlock_run_prog(enum landlock_hook_id 
hook_id, __u64 args[6])
         * prioritize fine-grained policies (i.e. per thread), and return early.
         */
 
+       if (unlikely(in_interrupt())) {
+               current_call = LANDLOCK_FLAG_ORIGIN_INTERRUPT;
+#ifdef CONFIG_SECCOMP_FILTER
+               /* bypass landlock_ret evaluation */
+               goto seccomp_int;
+#endif /* CONFIG_SECCOMP_FILTER */
+       } else {
+               current_call = LANDLOCK_FLAG_ORIGIN_SYSCALL;
+       }
+
 #ifdef CONFIG_SECCOMP_FILTER
        /* seccomp triggers and landlock_ret cleanup */
        ctx.origin = LANDLOCK_FLAG_ORIGIN_SECCOMP;
@@ -164,8 +176,9 @@ static int landlock_run_prog(enum landlock_hook_id hook_id, 
__u64 args[6])
                return -ret;
        ctx.cookie = 0;
 
+seccomp_int:
        /* syscall trigger */
-       ctx.origin = LANDLOCK_FLAG_ORIGIN_SYSCALL;
+       ctx.origin = current_call;
        ret = landlock_run_prog_for_syscall(hook_idx, &ctx,
                        current->seccomp.landlock_hooks);
        if (ret)
@@ -175,7 +188,7 @@ static int landlock_run_prog(enum landlock_hook_id hook_id, 
__u64 args[6])
 #ifdef CONFIG_CGROUP_BPF
        /* syscall trigger */
        if (cgroup_bpf_enabled) {
-               ctx.origin = LANDLOCK_FLAG_ORIGIN_SYSCALL;
+               ctx.origin = current_call;
                /* get the default cgroup associated with the current thread */
                cgrp = task_css_set(current)->dfl_cgrp;
                ret = landlock_run_prog_for_syscall(hook_idx, &ctx,
-- 
2.9.3

Reply via email to