The branch main has been updated by markj:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=3b9b64457676561b8de2bec7c94c561bbd807b0c

commit 3b9b64457676561b8de2bec7c94c561bbd807b0c
Author:     Mark Johnston <[email protected]>
AuthorDate: 2025-10-16 23:12:54 +0000
Commit:     Mark Johnston <[email protected]>
CommitDate: 2025-10-16 23:13:34 +0000

    vm: Fix iterator usage in vm_thread_stack_create()
    
    After commit 7a79d0669761 we no longer hold the VM object lock when
    initializing or advancing the domainset iterator in
    vm_thread_stack_create().
    
    We could lift the lock out of vm_thread_stack_back() but that poses
    complications since we cannot allocate KVA with a VM object lock held.
    
    Instead of overridding the object's iterator, just borrow that of the
    current thread.  Kernel stacks are basically always allocated with a
    DOMAINSET_POLICY_PREFER policy, so it's not very important to maintain a
    global iterator for round-robin allocation.
    
    As a part of this, fix up flag handling: make sure we handle
    M_NOWAIT/M_WAITOK from the caller.  Delete a comment in vm_thread_new()
    which refers to a non-existent consideration (stack swapping has been
    removed).  I suspect vm_thread_new() can use M_WAITOK but opted not to
    make that change here.
    
    Reported by:    olce
    Reviewed by:    olce, alc, kib
    Fixes:          7a79d0669761 ("vm: improve kstack_object pindex calculation 
to avoid pindex holes")
    MFC after:      1 week
    Differential Revision:  https://reviews.freebsd.org/D52982
---
 sys/vm/vm_glue.c | 24 ++++++++----------------
 1 file changed, 8 insertions(+), 16 deletions(-)

diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index e0f1807a1b32..18d789c59281 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -441,19 +441,16 @@ vm_thread_kstack_arena_release(void *arena, vmem_addr_t 
addr, vmem_size_t size)
  * Create the kernel stack for a new thread.
  */
 static vm_offset_t
-vm_thread_stack_create(struct domainset *ds, int pages)
+vm_thread_stack_create(struct domainset *ds, int pages, int flags)
 {
        vm_page_t ma[KSTACK_MAX_PAGES];
        struct vm_domainset_iter di;
-       int req = VM_ALLOC_NORMAL;
-       vm_object_t obj;
+       int req;
        vm_offset_t ks;
        int domain, i;
 
-       obj = vm_thread_kstack_size_to_obj(pages);
-       if (vm_ndomains > 1)
-               obj->domain.dr_policy = ds;
-       vm_domainset_iter_page_init(&di, obj, 0, &domain, &req);
+       vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
+       req = malloc2vm_flags(flags);
        do {
                /*
                 * Get a kernel virtual address for this thread's kstack.
@@ -480,7 +477,7 @@ vm_thread_stack_create(struct domainset *ds, int pages)
                        vm_page_valid(ma[i]);
                pmap_qenter(ks, ma, pages);
                return (ks);
-       } while (vm_domainset_iter_page(&di, obj, &domain, NULL) == 0);
+       } while (vm_domainset_iter_policy(&di, &domain) == 0);
 
        return (0);
 }
@@ -532,15 +529,9 @@ vm_thread_new(struct thread *td, int pages)
        ks = 0;
        if (pages == kstack_pages && kstack_cache != NULL)
                ks = (vm_offset_t)uma_zalloc(kstack_cache, M_NOWAIT);
-
-       /*
-        * Ensure that kstack objects can draw pages from any memory
-        * domain.  Otherwise a local memory shortage can block a process
-        * swap-in.
-        */
        if (ks == 0)
                ks = vm_thread_stack_create(DOMAINSET_PREF(PCPU_GET(domain)),
-                   pages);
+                   pages, M_NOWAIT);
        if (ks == 0)
                return (0);
 
@@ -660,7 +651,8 @@ kstack_import(void *arg, void **store, int cnt, int domain, 
int flags)
                ds = DOMAINSET_PREF(domain);
 
        for (i = 0; i < cnt; i++) {
-               store[i] = (void *)vm_thread_stack_create(ds, kstack_pages);
+               store[i] = (void *)vm_thread_stack_create(ds, kstack_pages,
+                   flags);
                if (store[i] == NULL)
                        break;
        }

Reply via email to