Module Name: src Committed By: matt Date: Wed Jul 4 11:39:42 UTC 2012
Modified Files: src/sys/common/pmap/tlb: pmap.c pmap.h pmap_segtab.c Log Message: The lockless list can lead to corruption. Instead use a spinlock to control access to the head of the free segtab list. To generate a diff of this commit: cvs rdiff -u -r1.13 -r1.14 src/sys/common/pmap/tlb/pmap.c cvs rdiff -u -r1.12 -r1.13 src/sys/common/pmap/tlb/pmap.h cvs rdiff -u -r1.4 -r1.5 src/sys/common/pmap/tlb/pmap_segtab.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/common/pmap/tlb/pmap.c diff -u src/sys/common/pmap/tlb/pmap.c:1.13 src/sys/common/pmap/tlb/pmap.c:1.14 --- src/sys/common/pmap/tlb/pmap.c:1.13 Thu May 17 16:20:19 2012 +++ src/sys/common/pmap/tlb/pmap.c Wed Jul 4 11:39:42 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.13 2012/05/17 16:20:19 matt Exp $ */ +/* $NetBSD: pmap.c,v 1.14 2012/07/04 11:39:42 matt Exp $ */ /*- * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. @@ -67,7 +67,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.13 2012/05/17 16:20:19 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.14 2012/07/04 11:39:42 matt Exp $"); /* * Manages physical address maps. @@ -456,6 +456,11 @@ pmap_init(void) UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); /* + * Initialize the segtab lock. + */ + mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH); + + /* * Set a low water mark on the pv_entry pool, so that we are * more likely to have these around even in extreme memory * starvation. Index: src/sys/common/pmap/tlb/pmap.h diff -u src/sys/common/pmap/tlb/pmap.h:1.12 src/sys/common/pmap/tlb/pmap.h:1.13 --- src/sys/common/pmap/tlb/pmap.h:1.12 Thu May 17 16:20:19 2012 +++ src/sys/common/pmap/tlb/pmap.h Wed Jul 4 11:39:42 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.12 2012/05/17 16:20:19 matt Exp $ */ +/* $NetBSD: pmap.h,v 1.13 2012/07/04 11:39:42 matt Exp $ */ /* * Copyright (c) 1992, 1993 @@ -109,6 +109,7 @@ void pmap_pte_process(struct pmap *, vad void pmap_segtab_activate(struct pmap *, struct lwp *); void pmap_segtab_alloc(struct pmap *); void pmap_segtab_free(struct pmap *); +extern kmutex_t pmap_segtab_lock; #endif /* _KERNEL */ /* Index: src/sys/common/pmap/tlb/pmap_segtab.c diff -u src/sys/common/pmap/tlb/pmap_segtab.c:1.4 src/sys/common/pmap/tlb/pmap_segtab.c:1.5 --- src/sys/common/pmap/tlb/pmap_segtab.c:1.4 Thu Jun 23 08:11:56 2011 +++ src/sys/common/pmap/tlb/pmap_segtab.c Wed Jul 4 11:39:42 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_segtab.c,v 1.4 2011/06/23 08:11:56 matt Exp $ */ +/* $NetBSD: pmap_segtab.c,v 1.5 2012/07/04 11:39:42 matt Exp $ */ /*- * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. @@ -67,7 +67,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.4 2011/06/23 08:11:56 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.5 2012/07/04 11:39:42 matt Exp $"); /* * Manages physical address maps. @@ -110,7 +110,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap_segtab. CTASSERT(NBPG >= sizeof(struct pmap_segtab)); struct pmap_segtab_info { - struct pmap_segtab * volatile free_segtab; /* free list kept locally */ + struct pmap_segtab *free_segtab; /* free list kept locally */ #ifdef DEBUG uint32_t nget_segtab; uint32_t nput_segtab; @@ -121,6 +121,8 @@ struct pmap_segtab_info { #endif } pmap_segtab_info; +kmutex_t pmap_segtab_lock __cacheline_aligned; + static inline struct vm_page * pmap_pte_pagealloc(void) { @@ -162,20 +164,16 @@ pmap_segtab_alloc(pmap_t pmap) { struct pmap_segtab *stp; again: - stp = NULL; - while (__predict_true(pmap_segtab_info.free_segtab != NULL)) { - struct pmap_segtab *next_stp; - stp = pmap_segtab_info.free_segtab; - next_stp = (struct pmap_segtab *)stp->seg_tab[0]; - if (stp == atomic_cas_ptr(&pmap_segtab_info.free_segtab, stp, next_stp)) { - SEGTAB_ADD(nget, 1); - break; - } + mutex_spin_enter(&pmap_segtab_lock); + if (__predict_true((stp = pmap_segtab_info.free_segtab) != NULL)) { + pmap_segtab_info.free_segtab = + (struct pmap_segtab *)stp->seg_tab[0]; + stp->seg_tab[0] = NULL; + SEGTAB_ADD(nget, 1); } + mutex_spin_exit(&pmap_segtab_lock); - if (__predict_true(stp != NULL)) { - stp->seg_tab[0] = NULL; - } else { + if (__predict_false(stp == NULL)) { struct vm_page * const stp_pg = pmap_pte_pagealloc(); if (__predict_false(stp_pg == NULL)) { @@ -200,13 +198,11 @@ pmap_segtab_alloc(pmap_t pmap) /* * Now link the new segtabs into the free segtab list. */ - for (;;) { - void *tmp = pmap_segtab_info.free_segtab; - stp[n-1].seg_tab[0] = tmp; - if (tmp == atomic_cas_ptr(&pmap_segtab_info.free_segtab, tmp, stp+1)) - break; - } + mutex_spin_enter(&pmap_segtab_lock); + stp[n-1].seg_tab[0] = (void *)pmap_segtab_info.free_segtab; + pmap_segtab_info.free_segtab = stp + 1; SEGTAB_ADD(nput, n - 1); + mutex_spin_exit(&pmap_segtab_lock); } } @@ -255,14 +251,11 @@ pmap_segtab_free(pmap_t pmap) /* * Insert the the segtab into the segtab freelist. */ - for (;;) { - void *tmp = pmap_segtab_info.free_segtab; - stp->seg_tab[0] = tmp; - if (tmp == atomic_cas_ptr(&pmap_segtab_info.free_segtab, tmp, stp)) { - SEGTAB_ADD(nput, 1); - break; - } - } + mutex_spin_enter(&pmap_segtab_lock); + stp->seg_tab[0] = (void *) pmap_segtab_info.free_segtab; + pmap_segtab_info.free_segtab = stp; + SEGTAB_ADD(nput, 1); + mutex_spin_exit(&pmap_segtab_lock); } /*