From: Bobby Eshleman <[email protected]>

Refactor sock_devmem_dontneed() in preparation for supporting both
autorelease and manual token release modes.

Split the function into two parts:
- sock_devmem_dontneed(): handles input validation, token allocation,
  and copying from userspace
- sock_devmem_dontneed_autorelease(): performs the actual token release
  via xarray lookup and page pool put

This separation allows a future commit to add a parallel
sock_devmem_dontneed_manual_release() function that uses a different
token tracking mechanism (per-niov reference counting) without
duplicating the input validation logic.

The refactoring is purely mechanical with no functional change. Only
intended to minimize the noise in subsequent patches.

Reviewed-by: Mina Almasry <[email protected]>
Signed-off-by: Bobby Eshleman <[email protected]>
---
 net/core/sock.c | 52 ++++++++++++++++++++++++++++++++--------------------
 1 file changed, 32 insertions(+), 20 deletions(-)

diff --git a/net/core/sock.c b/net/core/sock.c
index 45c98bf524b2..a5932719b191 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1082,30 +1082,13 @@ static int sock_reserve_memory(struct sock *sk, int 
bytes)
 #define MAX_DONTNEED_FRAGS 1024
 
 static noinline_for_stack int
-sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
+sock_devmem_dontneed_autorelease(struct sock *sk, struct dmabuf_token *tokens,
+                                unsigned int num_tokens)
 {
-       unsigned int num_tokens, i, j, k, netmem_num = 0;
-       struct dmabuf_token *tokens;
+       unsigned int i, j, k, netmem_num = 0;
        int ret = 0, num_frags = 0;
        netmem_ref netmems[16];
 
-       if (!sk_is_tcp(sk))
-               return -EBADF;
-
-       if (optlen % sizeof(*tokens) ||
-           optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS)
-               return -EINVAL;
-
-       num_tokens = optlen / sizeof(*tokens);
-       tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL);
-       if (!tokens)
-               return -ENOMEM;
-
-       if (copy_from_sockptr(tokens, optval, optlen)) {
-               kvfree(tokens);
-               return -EFAULT;
-       }
-
        xa_lock_bh(&sk->sk_user_frags);
        for (i = 0; i < num_tokens; i++) {
                for (j = 0; j < tokens[i].token_count; j++) {
@@ -1135,6 +1118,35 @@ sock_devmem_dontneed(struct sock *sk, sockptr_t optval, 
unsigned int optlen)
        for (k = 0; k < netmem_num; k++)
                WARN_ON_ONCE(!napi_pp_put_page(netmems[k]));
 
+       return ret;
+}
+
+static noinline_for_stack int
+sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
+{
+       struct dmabuf_token *tokens;
+       unsigned int num_tokens;
+       int ret;
+
+       if (!sk_is_tcp(sk))
+               return -EBADF;
+
+       if (optlen % sizeof(*tokens) ||
+           optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS)
+               return -EINVAL;
+
+       num_tokens = optlen / sizeof(*tokens);
+       tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL);
+       if (!tokens)
+               return -ENOMEM;
+
+       if (copy_from_sockptr(tokens, optval, optlen)) {
+               kvfree(tokens);
+               return -EFAULT;
+       }
+
+       ret = sock_devmem_dontneed_autorelease(sk, tokens, num_tokens);
+
        kvfree(tokens);
        return ret;
 }

-- 
2.47.3


Reply via email to