Module Name: src Committed By: maxv Date: Fri Apr 24 16:27:28 UTC 2020
Modified Files: src/sys/arch/amd64/amd64: netbsd32_machdep.c src/sys/arch/amd64/include: gdt.h src/sys/arch/i386/include: gdt.h src/sys/arch/x86/include: pmap.h src/sys/arch/x86/x86: pmap.c svs.c sys_machdep.c Log Message: Give the ldt a fixed size of one page (512 slots), and drop the variable- sized mechanism that was too complex. This fixes a race between USER_LDT and SVS: during context switches, the way SVS installs the new ldt relies on the ldt pointer AND the ldt size, but both cannot be accessed atomically at the same time. To generate a diff of this commit: cvs rdiff -u -r1.134 -r1.135 src/sys/arch/amd64/amd64/netbsd32_machdep.c cvs rdiff -u -r1.10 -r1.11 src/sys/arch/amd64/include/gdt.h cvs rdiff -u -r1.16 -r1.17 src/sys/arch/i386/include/gdt.h cvs rdiff -u -r1.117 -r1.118 src/sys/arch/x86/include/pmap.h cvs rdiff -u -r1.381 -r1.382 src/sys/arch/x86/x86/pmap.c cvs rdiff -u -r1.32 -r1.33 src/sys/arch/x86/x86/svs.c cvs rdiff -u -r1.53 -r1.54 src/sys/arch/x86/x86/sys_machdep.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/amd64/amd64/netbsd32_machdep.c diff -u src/sys/arch/amd64/amd64/netbsd32_machdep.c:1.134 src/sys/arch/amd64/amd64/netbsd32_machdep.c:1.135 --- src/sys/arch/amd64/amd64/netbsd32_machdep.c:1.134 Thu Apr 23 16:16:14 2020 +++ src/sys/arch/amd64/amd64/netbsd32_machdep.c Fri Apr 24 16:27:27 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: netbsd32_machdep.c,v 1.134 2020/04/23 16:16:14 christos Exp $ */ +/* $NetBSD: netbsd32_machdep.c,v 1.135 2020/04/24 16:27:27 maxv Exp $ */ /* * Copyright (c) 2001 Wasabi Systems, Inc. @@ -36,7 +36,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: netbsd32_machdep.c,v 1.134 2020/04/23 16:16:14 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: netbsd32_machdep.c,v 1.135 2020/04/24 16:27:27 maxv Exp $"); #ifdef _KERNEL_OPT #include "opt_compat_netbsd.h" @@ -74,6 +74,7 @@ __KERNEL_RCSID(0, "$NetBSD: netbsd32_mac #include <machine/netbsd32_machdep.h> #include <machine/sysarch.h> #include <machine/userret.h> +#include <machine/gdt.h> #include <compat/netbsd32/netbsd32.h> #include <compat/netbsd32/netbsd32_exec.h> @@ -628,7 +629,7 @@ x86_64_set_ldt32(struct lwp *l, void *ar ua.start = ua32.start; ua.num = ua32.num; - if (ua.num < 0 || ua.num > 8192) + if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS) return EINVAL; descv = malloc(sizeof(*descv) * ua.num, M_TEMP, M_WAITOK); @@ -656,7 +657,7 @@ x86_64_get_ldt32(struct lwp *l, void *ar ua.start = ua32.start; ua.num = ua32.num; - if (ua.num < 0 || ua.num > 8192) + if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS) return EINVAL; cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK); Index: src/sys/arch/amd64/include/gdt.h diff -u src/sys/arch/amd64/include/gdt.h:1.10 src/sys/arch/amd64/include/gdt.h:1.11 --- src/sys/arch/amd64/include/gdt.h:1.10 Wed Feb 8 10:08:26 2017 +++ src/sys/arch/amd64/include/gdt.h Fri Apr 24 16:27:28 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: gdt.h,v 1.10 2017/02/08 10:08:26 maxv Exp $ */ +/* $NetBSD: gdt.h,v 1.11 2020/04/24 16:27:28 maxv Exp $ */ /*- * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. @@ -45,5 +45,6 @@ int ldt_alloc(void *, size_t); void ldt_free(int); #endif -#define MINGDTSIZ PAGE_SIZE -#define MAXGDTSIZ 65536 +#define MAXGDTSIZ 65536 +#define MAX_USERLDT_SIZE PAGE_SIZE +#define MAX_USERLDT_SLOTS (int)(MAX_USERLDT_SIZE / sizeof(union descriptor)) Index: src/sys/arch/i386/include/gdt.h diff -u src/sys/arch/i386/include/gdt.h:1.16 src/sys/arch/i386/include/gdt.h:1.17 --- src/sys/arch/i386/include/gdt.h:1.16 Sun Jul 2 09:02:06 2017 +++ src/sys/arch/i386/include/gdt.h Fri Apr 24 16:27:28 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: gdt.h,v 1.16 2017/07/02 09:02:06 maxv Exp $ */ +/* $NetBSD: gdt.h,v 1.17 2020/04/24 16:27:28 maxv Exp $ */ /*- * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. @@ -44,5 +44,6 @@ void ldt_free(int); #endif /* LOCORE */ -#define MINGDTSIZ PAGE_SIZE -#define MAXGDTSIZ 65536 +#define MAXGDTSIZ 65536 +#define MAX_USERLDT_SIZE PAGE_SIZE +#define MAX_USERLDT_SLOTS (int)(MAX_USERLDT_SIZE / sizeof(union descriptor)) Index: src/sys/arch/x86/include/pmap.h diff -u src/sys/arch/x86/include/pmap.h:1.117 src/sys/arch/x86/include/pmap.h:1.118 --- src/sys/arch/x86/include/pmap.h:1.117 Sun Apr 5 00:21:11 2020 +++ src/sys/arch/x86/include/pmap.h Fri Apr 24 16:27:28 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.117 2020/04/05 00:21:11 ad Exp $ */ +/* $NetBSD: pmap.h,v 1.118 2020/04/24 16:27:28 maxv Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -191,9 +191,13 @@ extern struct slotspace slotspace; #define MAXGDTSIZ 65536 /* XXX */ #endif +#ifndef MAX_USERLDT_SIZE +#define MAX_USERLDT_SIZE PAGE_SIZE /* XXX */ +#endif + struct pcpu_entry { uint8_t gdt[MAXGDTSIZ]; - uint8_t ldt[MAXGDTSIZ]; + uint8_t ldt[MAX_USERLDT_SIZE]; uint8_t tss[PAGE_SIZE]; uint8_t ist0[PAGE_SIZE]; uint8_t ist1[PAGE_SIZE]; @@ -268,8 +272,9 @@ struct pmap { #endif /* !defined(__x86_64__) */ union descriptor *pm_ldt; /* user-set LDT */ - size_t pm_ldt_len; /* size of LDT in bytes */ + size_t pm_ldt_len; /* XXX unused, remove */ int pm_ldt_sel; /* LDT selector */ + kcpuset_t *pm_cpus; /* mask of CPUs using pmap */ kcpuset_t *pm_kernel_cpus; /* mask of CPUs using kernel part of pmap */ Index: src/sys/arch/x86/x86/pmap.c diff -u src/sys/arch/x86/x86/pmap.c:1.381 src/sys/arch/x86/x86/pmap.c:1.382 --- src/sys/arch/x86/x86/pmap.c:1.381 Sun Apr 5 00:21:11 2020 +++ src/sys/arch/x86/x86/pmap.c Fri Apr 24 16:27:28 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.381 2020/04/05 00:21:11 ad Exp $ */ +/* $NetBSD: pmap.c,v 1.382 2020/04/24 16:27:28 maxv Exp $ */ /* * Copyright (c) 2008, 2010, 2016, 2017, 2019, 2020 The NetBSD Foundation, Inc. @@ -130,7 +130,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.381 2020/04/05 00:21:11 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.382 2020/04/24 16:27:28 maxv Exp $"); #include "opt_user_ldt.h" #include "opt_lockdebug.h" @@ -1208,7 +1208,6 @@ pmap_bootstrap(vaddr_t kva_start) kcpuset_create(&kpm->pm_kernel_cpus, true); kpm->pm_ldt = NULL; - kpm->pm_ldt_len = 0; kpm->pm_ldt_sel = GSYSSEL(GLDT_SEL, SEL_KPL); /* @@ -2857,7 +2856,6 @@ pmap_create(void) /* init the LDT */ pmap->pm_ldt = NULL; - pmap->pm_ldt_len = 0; pmap->pm_ldt_sel = GSYSSEL(GLDT_SEL, SEL_KPL); return (pmap); @@ -2952,7 +2950,7 @@ pmap_destroy(struct pmap *pmap) #ifdef USER_LDT if (pmap->pm_ldt != NULL) { /* - * no need to switch the LDT; this address space is gone, + * No need to switch the LDT; this address space is gone, * nothing is using it. * * No need to lock the pmap for ldt_free (or anything else), @@ -2963,7 +2961,7 @@ pmap_destroy(struct pmap *pmap) ldt_free(pmap->pm_ldt_sel); mutex_exit(&cpu_lock); uvm_km_free(kernel_map, (vaddr_t)pmap->pm_ldt, - pmap->pm_ldt_len, UVM_KMF_WIRED); + MAX_USERLDT_SIZE, UVM_KMF_WIRED); } #endif @@ -3209,7 +3207,6 @@ pmap_fork(struct pmap *pmap1, struct pma { #ifdef USER_LDT union descriptor *new_ldt; - size_t len; int sel; if (__predict_true(pmap1->pm_ldt == NULL)) { @@ -3219,18 +3216,16 @@ pmap_fork(struct pmap *pmap1, struct pma /* * Copy the LDT into the new process. * - * Read pmap1's ldt pointer and length unlocked; if it changes - * behind our back we'll retry. This will starve if there's a - * stream of LDT changes in another thread but that should not - * happen. + * Read pmap1's ldt pointer unlocked; if it changes behind our back + * we'll retry. This will starve if there's a stream of LDT changes + * in another thread but that should not happen. */ - retry: +retry: if (pmap1->pm_ldt != NULL) { - len = pmap1->pm_ldt_len; /* Allocate space for the new process's LDT */ - new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, len, 0, - UVM_KMF_WIRED); + new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, + MAX_USERLDT_SIZE, 0, UVM_KMF_WIRED); if (new_ldt == NULL) { printf("WARNING: %s: unable to allocate LDT space\n", __func__); @@ -3238,51 +3233,48 @@ pmap_fork(struct pmap *pmap1, struct pma } mutex_enter(&cpu_lock); /* Get a GDT slot for it */ - sel = ldt_alloc(new_ldt, len); + sel = ldt_alloc(new_ldt, MAX_USERLDT_SIZE); if (sel == -1) { mutex_exit(&cpu_lock); - uvm_km_free(kernel_map, (vaddr_t)new_ldt, len, - UVM_KMF_WIRED); + uvm_km_free(kernel_map, (vaddr_t)new_ldt, + MAX_USERLDT_SIZE, UVM_KMF_WIRED); printf("WARNING: %s: unable to allocate LDT selector\n", __func__); return; } } else { /* Wasn't anything there after all. */ - len = -1; new_ldt = NULL; sel = -1; mutex_enter(&cpu_lock); } - /* If there's still something there now that we have cpu_lock... */ + /* + * Now that we have cpu_lock, ensure the LDT status is the same. + */ if (pmap1->pm_ldt != NULL) { - if (len != pmap1->pm_ldt_len) { - /* Oops, it changed. Drop what we did and try again */ - if (len != -1) { - ldt_free(sel); - uvm_km_free(kernel_map, (vaddr_t)new_ldt, - len, UVM_KMF_WIRED); - } + if (new_ldt == NULL) { + /* A wild LDT just appeared. */ mutex_exit(&cpu_lock); goto retry; } /* Copy the LDT data and install it in pmap2 */ - memcpy(new_ldt, pmap1->pm_ldt, len); + memcpy(new_ldt, pmap1->pm_ldt, MAX_USERLDT_SIZE); pmap2->pm_ldt = new_ldt; - pmap2->pm_ldt_len = pmap1->pm_ldt_len; pmap2->pm_ldt_sel = sel; - len = -1; - } - - if (len != -1) { - /* There wasn't still something there, so mop up */ - ldt_free(sel); mutex_exit(&cpu_lock); - uvm_km_free(kernel_map, (vaddr_t)new_ldt, len, - UVM_KMF_WIRED); } else { + if (new_ldt != NULL) { + /* The LDT disappeared, drop what we did. */ + ldt_free(sel); + mutex_exit(&cpu_lock); + uvm_km_free(kernel_map, (vaddr_t)new_ldt, + MAX_USERLDT_SIZE, UVM_KMF_WIRED); + return; + } + + /* We're good, just leave. */ mutex_exit(&cpu_lock); } #endif /* USER_LDT */ @@ -3337,9 +3329,8 @@ void pmap_ldt_cleanup(struct lwp *l) { pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; - union descriptor *dp = NULL; - size_t len = 0; - int sel = -1; + union descriptor *ldt; + int sel; if (__predict_true(pmap->pm_ldt == NULL)) { return; @@ -3348,14 +3339,13 @@ pmap_ldt_cleanup(struct lwp *l) mutex_enter(&cpu_lock); if (pmap->pm_ldt != NULL) { sel = pmap->pm_ldt_sel; - dp = pmap->pm_ldt; - len = pmap->pm_ldt_len; + ldt = pmap->pm_ldt; pmap->pm_ldt_sel = GSYSSEL(GLDT_SEL, SEL_KPL); pmap->pm_ldt = NULL; - pmap->pm_ldt_len = 0; pmap_ldt_sync(pmap); ldt_free(sel); - uvm_km_free(kernel_map, (vaddr_t)dp, len, UVM_KMF_WIRED); + uvm_km_free(kernel_map, (vaddr_t)ldt, MAX_USERLDT_SIZE, + UVM_KMF_WIRED); } mutex_exit(&cpu_lock); } Index: src/sys/arch/x86/x86/svs.c diff -u src/sys/arch/x86/x86/svs.c:1.32 src/sys/arch/x86/x86/svs.c:1.33 --- src/sys/arch/x86/x86/svs.c:1.32 Fri Jan 31 08:55:38 2020 +++ src/sys/arch/x86/x86/svs.c Fri Apr 24 16:27:28 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: svs.c,v 1.32 2020/01/31 08:55:38 maxv Exp $ */ +/* $NetBSD: svs.c,v 1.33 2020/04/24 16:27:28 maxv Exp $ */ /* * Copyright (c) 2018-2019 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: svs.c,v 1.32 2020/01/31 08:55:38 maxv Exp $"); +__KERNEL_RCSID(0, "$NetBSD: svs.c,v 1.33 2020/04/24 16:27:28 maxv Exp $"); #include "opt_svs.h" #include "opt_user_ldt.h" @@ -474,7 +474,8 @@ cpu_svs_init(struct cpu_info *ci) #ifdef USER_LDT mutex_enter(&cpu_lock); - ci->ci_svs_ldt_sel = ldt_alloc(&pcpuarea->ent[cid].ldt, MAXGDTSIZ); + ci->ci_svs_ldt_sel = ldt_alloc(&pcpuarea->ent[cid].ldt, + MAX_USERLDT_SIZE); mutex_exit(&cpu_lock); #endif } @@ -512,13 +513,27 @@ void svs_ldt_sync(struct pmap *pmap) { struct cpu_info *ci = curcpu(); - int sel = pmap->pm_ldt_sel; + void *ldt; + int sel; KASSERT(kpreempt_disabled()); + /* + * Another LWP could concurrently modify the LDT via x86_set_ldt1(). + * The LWP will wait for pmap_ldt_sync() to finish before destroying + * the outdated LDT. + * + * We have preemption disabled here, so it is guaranteed that even + * if the LDT we are syncing is the outdated one, it is still valid. + * + * pmap_ldt_sync() will execute later once we have preemption enabled, + * and will install the new LDT. + */ + sel = atomic_load_relaxed(&pmap->pm_ldt_sel); if (__predict_false(sel != GSYSSEL(GLDT_SEL, SEL_KPL))) { - memcpy(&pcpuarea->ent[cpu_index(ci)].ldt, pmap->pm_ldt, - pmap->pm_ldt_len); + ldt = atomic_load_relaxed(&pmap->pm_ldt); + memcpy(&pcpuarea->ent[cpu_index(ci)].ldt, ldt, + MAX_USERLDT_SIZE); sel = ci->ci_svs_ldt_sel; } Index: src/sys/arch/x86/x86/sys_machdep.c diff -u src/sys/arch/x86/x86/sys_machdep.c:1.53 src/sys/arch/x86/x86/sys_machdep.c:1.54 --- src/sys/arch/x86/x86/sys_machdep.c:1.53 Tue Apr 21 20:20:39 2020 +++ src/sys/arch/x86/x86/sys_machdep.c Fri Apr 24 16:27:28 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: sys_machdep.c,v 1.53 2020/04/21 20:20:39 jdolecek Exp $ */ +/* $NetBSD: sys_machdep.c,v 1.54 2020/04/24 16:27:28 maxv Exp $ */ /* * Copyright (c) 1998, 2007, 2009, 2017 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.53 2020/04/21 20:20:39 jdolecek Exp $"); +__KERNEL_RCSID(0, "$NetBSD: sys_machdep.c,v 1.54 2020/04/24 16:27:28 maxv Exp $"); #include "opt_mtrr.h" #include "opt_user_ldt.h" @@ -97,7 +97,7 @@ x86_get_ldt(struct lwp *l, void *args, r if ((error = copyin(args, &ua, sizeof(ua))) != 0) return error; - if (ua.num < 0 || ua.num > 8192) + if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS) return EINVAL; cp = malloc(ua.num * sizeof(union descriptor), M_TEMP, M_WAITOK); @@ -137,8 +137,9 @@ x86_get_ldt1(struct lwp *l, struct x86_g if (error) return error; - if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 || - ua->start + ua->num > 8192) + if (ua->start < 0 || ua->num < 0 || + ua->start > MAX_USERLDT_SLOTS || ua->num > MAX_USERLDT_SLOTS || + ua->start + ua->num > MAX_USERLDT_SLOTS) return EINVAL; if (ua->start * sizeof(union descriptor) < min_ldt_size) @@ -147,7 +148,7 @@ x86_get_ldt1(struct lwp *l, struct x86_g mutex_enter(&cpu_lock); if (pmap->pm_ldt != NULL) { - nldt = pmap->pm_ldt_len / sizeof(*lp); + nldt = MAX_USERLDT_SIZE / sizeof(*lp); lp = pmap->pm_ldt; } else { #ifdef __x86_64__ @@ -187,7 +188,7 @@ x86_set_ldt(struct lwp *l, void *args, r if ((error = copyin(args, &ua, sizeof(ua))) != 0) return error; - if (ua.num < 0 || ua.num > 8192) + if (ua.num < 0 || ua.num > MAX_USERLDT_SLOTS) return EINVAL; descv = malloc(sizeof (*descv) * ua.num, M_TEMP, M_WAITOK); @@ -211,7 +212,6 @@ x86_set_ldt1(struct lwp *l, struct x86_s int error, i, n, old_sel, new_sel; struct proc *p = l->l_proc; pmap_t pmap = p->p_vmspace->vm_map.pmap; - size_t old_len, new_len; union descriptor *old_ldt, *new_ldt; #ifdef __x86_64__ @@ -225,8 +225,9 @@ x86_set_ldt1(struct lwp *l, struct x86_s if (error) return error; - if (ua->start < 0 || ua->num < 0 || ua->start > 8192 || ua->num > 8192 || - ua->start + ua->num > 8192) + if (ua->start < 0 || ua->num < 0 || + ua->start > MAX_USERLDT_SLOTS || ua->num > MAX_USERLDT_SLOTS || + ua->start + ua->num > MAX_USERLDT_SLOTS) return EINVAL; if (ua->start * sizeof(union descriptor) < min_ldt_size) @@ -278,36 +279,22 @@ x86_set_ldt1(struct lwp *l, struct x86_s } /* - * Install selected changes. We perform a copy, write, swap dance - * here to ensure that all updates happen atomically. + * Install selected changes. */ /* Allocate a new LDT. */ - for (;;) { - new_len = (ua->start + ua->num) * sizeof(union descriptor); - new_len = uimax(new_len, pmap->pm_ldt_len); - new_len = uimax(new_len, min_ldt_size); - new_len = round_page(new_len); - new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, - new_len, 0, UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_WAITVA); - mutex_enter(&cpu_lock); - if (pmap->pm_ldt_len <= new_len) { - break; - } - mutex_exit(&cpu_lock); - uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len, - UVM_KMF_WIRED); - } + new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, + MAX_USERLDT_SIZE, 0, UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_WAITVA); + + mutex_enter(&cpu_lock); /* Copy existing entries, if any. */ if (pmap->pm_ldt != NULL) { old_ldt = pmap->pm_ldt; - old_len = pmap->pm_ldt_len; old_sel = pmap->pm_ldt_sel; - memcpy(new_ldt, old_ldt, old_len); + memcpy(new_ldt, old_ldt, MAX_USERLDT_SIZE); } else { old_ldt = NULL; - old_len = 0; old_sel = -1; memcpy(new_ldt, ldtstore, min_ldt_size); } @@ -318,20 +305,19 @@ x86_set_ldt1(struct lwp *l, struct x86_s } /* Allocate LDT selector. */ - new_sel = ldt_alloc(new_ldt, new_len); + new_sel = ldt_alloc(new_ldt, MAX_USERLDT_SIZE); if (new_sel == -1) { mutex_exit(&cpu_lock); - uvm_km_free(kernel_map, (vaddr_t)new_ldt, new_len, + uvm_km_free(kernel_map, (vaddr_t)new_ldt, MAX_USERLDT_SIZE, UVM_KMF_WIRED); return ENOMEM; } /* All changes are now globally visible. Swap in the new LDT. */ - pmap->pm_ldt_len = new_len; - pmap->pm_ldt_sel = new_sel; + atomic_store_relaxed(&pmap->pm_ldt_sel, new_sel); /* membar_store_store for pmap_fork() to read these unlocked safely */ membar_producer(); - pmap->pm_ldt = new_ldt; + atomic_store_relaxed(&pmap->pm_ldt, new_ldt); /* Switch existing users onto new LDT. */ pmap_ldt_sync(pmap); @@ -341,7 +327,7 @@ x86_set_ldt1(struct lwp *l, struct x86_s ldt_free(old_sel); /* exit the mutex before free */ mutex_exit(&cpu_lock); - uvm_km_free(kernel_map, (vaddr_t)old_ldt, old_len, + uvm_km_free(kernel_map, (vaddr_t)old_ldt, MAX_USERLDT_SIZE, UVM_KMF_WIRED); } else { mutex_exit(&cpu_lock);