When auditd is bottlenecked (e.g., by slow disk I/O), kauditd blocks on
the netlink socket. If the wait timeout fully expires (timeo == 0),
netlink mistakenly interprets the zeroed timeout as a non-blocking
request. It then triggers netlink_overrun that drops the event,
completely bypassing the audit subsystem's internal retry queue, and
falsely returns ENOBUFS to user-space, resulting in the following error:

 auditd[]: Error receiving audit netlink packet (No buffer space available)

Convert the `nonblock` argument in netlink_unicast() into a `flags`
bitmask and introduce NETLINK_UNICAST_TIMED. When a caller specifies
this flag and exhausts its timeout budget, netlink intercepts the
zeroed timeo state. Instead of defaulting to an overrun condition, it
safely frees the skb and returns -EAGAIN, allowing the audit subsystem
to gracefully enqueue the pending event into its internal backlog.

Suggested-by: Steve Grubb <[email protected]>
Signed-off-by: Ricardo Robaina <[email protected]>
---
 include/linux/netlink.h  |  5 ++++-
 kernel/audit.c           |  8 ++++----
 net/netlink/af_netlink.c | 17 +++++++++++++++--
 3 files changed, 23 insertions(+), 7 deletions(-)

diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 882e9c1b6c1d..1888c8ee416a 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -226,7 +226,10 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr 
*nlh, int err,
 int netlink_has_listeners(struct sock *sk, unsigned int group);
 bool netlink_strict_get_check(struct sk_buff *skb);
 
-int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int 
nonblock);
+/* Internal flags for netlink_unicast (do not overlap with MSG_* flags) */
+#define NETLINK_UNICAST_TIMED 0x80000
+
+int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int 
flags);
 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
                      __u32 group, gfp_t allocation);
 
diff --git a/kernel/audit.c b/kernel/audit.c
index e1d489bc2dff..005bfd9dc7a4 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -753,7 +753,7 @@ static int auditd_send_unicast_skb(struct sk_buff *skb)
        portid = ac->portid;
        rcu_read_unlock();
 
-       rc = netlink_unicast(sk, skb, portid, 0);
+       rc = netlink_unicast(sk, skb, portid, NETLINK_UNICAST_TIMED);
        put_net(net);
        if (rc < 0)
                goto err;
@@ -811,7 +811,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
 retry:
                /* grab an extra skb reference in case of error */
                skb_get(skb);
-               rc = netlink_unicast(sk, skb, portid, 0);
+               rc = netlink_unicast(sk, skb, portid, NETLINK_UNICAST_TIMED);
                if (rc < 0) {
                        /* send failed - try a few times unless fatal error */
                        if (++failed >= retry_limit ||
@@ -967,7 +967,7 @@ int audit_send_list_thread(void *_dest)
        audit_ctl_unlock();
 
        while ((skb = __skb_dequeue(&dest->q)) != NULL)
-               netlink_unicast(sk, skb, dest->portid, 0);
+               netlink_unicast(sk, skb, dest->portid, NETLINK_UNICAST_TIMED);
 
        put_net(dest->net);
        kfree(dest);
@@ -1020,7 +1020,7 @@ static int audit_send_reply_thread(void *arg)
 
        /* Ignore failure. It'll only happen if the sender goes away,
           because our timeout is set to infinite. */
-       netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 0);
+       netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 
NETLINK_UNICAST_TIMED);
        reply->skb = NULL;
        audit_free_reply(reply);
        return 0;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2aeb0680807d..acaa96695981 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1325,14 +1325,17 @@ static int netlink_unicast_kernel(struct sock *sk, 
struct sk_buff *skb,
 }
 
 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
-                   u32 portid, int nonblock)
+                   u32 portid, int flags)
 {
        struct sock *sk;
        int err;
        long timeo;
+       int nonblock;
 
        skb = netlink_trim(skb, gfp_any());
 
+       /* Extract blocking mode: strip internal flags, preserve MSG_DONTWAIT */
+       nonblock = flags & ~NETLINK_UNICAST_TIMED;
        timeo = sock_sndtimeo(ssk, nonblock);
 retry:
        sk = netlink_getsockbyportid(ssk, portid);
@@ -1351,8 +1354,18 @@ int netlink_unicast(struct sock *ssk, struct sk_buff 
*skb,
        }
 
        err = netlink_attachskb(sk, skb, &timeo, ssk);
-       if (err == 1)
+       if (err == 1) {
+               /* timeo may have been zeroed by schedule_timeout inside
+                * netlink_attachskb. If the caller is a timed-blocking sender
+                * (not genuinely nonblocking), don't re-enter with timeo=0 as
+                * that would misfire netlink_overrun on the next iteration.
+                */
+               if (!timeo && (flags & NETLINK_UNICAST_TIMED)) {
+                       kfree_skb(skb);
+                       return -EAGAIN;
+               }
                goto retry;
+       }
        if (err)
                return err;
 
-- 
2.53.0


Reply via email to