On 9/26/2025 1:01 PM, David Hildenbrand wrote:
> On 25.09.25 23:35, Sean Christopherson wrote:
>> On Wed, Aug 27, 2025, Shivank Garg wrote:
>>> Add tests for NUMA memory policy binding and NUMA aware allocation in
>>> guest_memfd. This extends the existing selftests by adding proper
>>> validation for:
>>> - KVM GMEM set_policy and get_policy() vm_ops functionality using
>>>    mbind() and get_mempolicy()
>>> - NUMA policy application before and after memory allocation
>>>
>>> These tests help ensure NUMA support for guest_memfd works correctly.
>>>
>>> Signed-off-by: Shivank Garg <[email protected]>
>>> ---
>>>   tools/testing/selftests/kvm/Makefile.kvm      |   1 +
>>>   .../testing/selftests/kvm/guest_memfd_test.c  | 121 ++++++++++++++++++
>>>   2 files changed, 122 insertions(+)
>>>
>>> diff --git a/tools/testing/selftests/kvm/Makefile.kvm 
>>> b/tools/testing/selftests/kvm/Makefile.kvm
>>> index 90f03f00cb04..c46cef2a7cd7 100644
>>> --- a/tools/testing/selftests/kvm/Makefile.kvm
>>> +++ b/tools/testing/selftests/kvm/Makefile.kvm
>>> @@ -275,6 +275,7 @@ pgste-option = $(call try-run, echo 'int main(void) { 
>>> return 0; }' | \
>>>       $(CC) -Werror -Wl$(comma)--s390-pgste -x c - -o 
>>> "$$TMP",-Wl$(comma)--s390-pgste)
>>>     LDLIBS += -ldl
>>> +LDLIBS += -lnuma
>>
>> Hrm, this is going to be very annoying.  I don't have libnuma-dev installed 
>> on
>> any of my <too many> systems, and I doubt I'm alone.  Installing the package 
>> is
>> trivial, but I'm a little wary of foisting that requirement on all KVM 
>> developers
>> and build bots.
>>
>> I'd be especially curious what ARM and RISC-V think, as NUMA is likely a bit 
>> less
>> prevelant there.
> 
> We unconditionally use it in the mm tests for ksm and migration tests, so 
> it's not particularly odd to require it here as well.
> 
> What we do with liburing in mm selftests is to detect presence at compile 
> time and essentially make the tests behave differently based on availability 
> (see check_config.sh).
> 

I have an alternative that drops libnuma entirely.
If this approach looks reasonable, could we potentially factor these out into a
common test utility for other selftests that currently depend on libnuma?

What are your thoughts on this?

diff --git a/tools/testing/selftests/kvm/Makefile.kvm 
b/tools/testing/selftests/kvm/Makefile.kvm
index c46cef2a7cd7..90f03f00cb04 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -275,7 +275,6 @@ pgste-option = $(call try-run, echo 'int main(void) { 
return 0; }' | \
        $(CC) -Werror -Wl$(comma)--s390-pgste -x c - -o 
"$$TMP",-Wl$(comma)--s390-pgste)
 
 LDLIBS += -ldl
-LDLIBS += -lnuma
 LDFLAGS += -pthread $(no-pie-option) $(pgste-option)
 
 LIBKVM_C := $(filter %.c,$(LIBKVM))
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c 
b/tools/testing/selftests/kvm/guest_memfd_test.c
index 9640d04ec293..12ce91950c44 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -7,8 +7,6 @@
 #include <stdlib.h>
 #include <string.h>
 #include <unistd.h>
-#include <numa.h>
-#include <numaif.h>
 #include <errno.h>
 #include <stdio.h>
 #include <fcntl.h>
@@ -75,9 +73,6 @@ static void test_mmap_supported(int fd, size_t page_size, 
size_t total_size)
        TEST_ASSERT(!ret, "munmap() should succeed.");
 }
 
-#define TEST_REQUIRE_NUMA_MULTIPLE_NODES()     \
-       TEST_REQUIRE(numa_available() != -1 && numa_max_node() >= 1)
-
 static void test_mbind(int fd, size_t page_size, size_t total_size)
 {
        unsigned long nodemask = 1; /* nid: 0 */
@@ -87,7 +82,8 @@ static void test_mbind(int fd, size_t page_size, size_t 
total_size)
        char *mem;
        int ret;
 
-       TEST_REQUIRE_NUMA_MULTIPLE_NODES();
+       if (!is_multi_numa_node_system())
+               return;
 
        mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
        TEST_ASSERT(mem != MAP_FAILED, "mmap for mbind test should succeed");
@@ -136,7 +132,8 @@ static void test_numa_allocation(int fd, size_t page_size, 
size_t total_size)
        char *mem;
        int ret, i;
 
-       TEST_REQUIRE_NUMA_MULTIPLE_NODES();
+       if (!is_multi_numa_node_system())
+               return;
 
        /* Clean slate: deallocate all file space, if any */
        ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, 
total_size);
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h 
b/tools/testing/selftests/kvm/include/kvm_util.h
index 23a506d7eca3..ba4c316f4fef 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -12,6 +12,7 @@
 #include "linux/list.h"
 #include <linux/kernel.h>
 #include <linux/kvm.h>
+#include <linux/mempolicy.h>
 #include "linux/rbtree.h"
 #include <linux/types.h>
 
@@ -20,6 +21,7 @@
 
 #include <sys/eventfd.h>
 #include <sys/ioctl.h>
+#include <sys/syscall.h>
 
 #include <pthread.h>
 
@@ -633,6 +635,50 @@ static inline bool is_smt_on(void)
        return false;
 }
 
+#include <dirent.h>
+static int numa_max_node(void)
+{
+       DIR *d;
+       struct dirent *de;
+       int max_node = 0;
+
+       d = opendir("/sys/devices/system/node");
+       if (!d) {
+               /* No NUMA support or no nodes found, assume single node */
+               return 0;
+       }
+
+       while ((de = readdir(d)) != NULL) {
+               int node_id;
+               char *endptr;
+
+               if (strncmp(de->d_name, "node", 4) != 0)
+                       continue;
+
+               node_id = strtol(de->d_name + 4, &endptr, 10);
+               if (*endptr != '\0')
+                       continue;
+
+               if (node_id > max_node)
+                       max_node = node_id;
+       }
+       closedir(d);
+
+       return max_node;
+}
+
+static int numa_available(void)
+{
+       if (syscall(__NR_get_mempolicy, NULL, NULL, 0, 0, 0) < 0 && (errno == 
ENOSYS || errno == EPERM))
+               return -1;
+       return 0;
+}
+
+static inline bool is_multi_numa_node_system(void)
+{
+       return numa_available() != -1 && numa_max_node() >= 1;
+}
+
 void vm_create_irqchip(struct kvm_vm *vm);
 
 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,






_______________________________________________
Linux-f2fs-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel

Reply via email to