On 4/4/24 17:43, Alexander Lobakin wrote:
Add NUMA-aware counterparts for kvmalloc_array() and kvcalloc() to be
able to flexibly allocate arrays for a particular node.
Rewrite kvmalloc_array() to kvmalloc_array_node(NUMA_NO_NODE) call.

Signed-off-by: Alexander Lobakin <[email protected]>
---
  include/linux/slab.h | 17 +++++++++++++++--
  1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index e53cbfa18325..d1d1fa5e7983 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -774,14 +774,27 @@ static inline __alloc_size(1) void *kvzalloc(size_t size, 
gfp_t flags)
        return kvmalloc(size, flags | __GFP_ZERO);
  }
-static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *
+kvmalloc_array_node(size_t n, size_t size, gfp_t flags, int node)
  {
        size_t bytes;
if (unlikely(check_mul_overflow(n, size, &bytes)))
                return NULL;
- return kvmalloc(bytes, flags);
+       return kvmalloc_node(bytes, flags, node);
+}
+
+static inline __alloc_size(1, 2) void *
+kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+       return kvmalloc_array_node(n, size, flags, NUMA_NO_NODE);
+}
+
+static inline __alloc_size(1, 2) void *
+kvcalloc_node(size_t n, size_t size, gfp_t flags, int node)
+{
+       return kvmalloc_array_node(n, size, flags | __GFP_ZERO, node);
  }
static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)

Reviewed-by: Przemek Kitszel <[email protected]>

Reply via email to