Module Name: src Committed By: thorpej Date: Wed Dec 26 20:08:22 UTC 2018
Modified Files: src/sys/kern: kern_threadpool.c Log Message: Use uint64_t for the unbound and per-cpu thread pool ref counts; they're always manipulated under a lock. Rather than bother returning EBUSY, just assert that the ref count never overlows (if it ever does, you have bigger problems). To generate a diff of this commit: cvs rdiff -u -r1.4 -r1.5 src/sys/kern/kern_threadpool.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/kern/kern_threadpool.c diff -u src/sys/kern/kern_threadpool.c:1.4 src/sys/kern/kern_threadpool.c:1.5 --- src/sys/kern/kern_threadpool.c:1.4 Wed Dec 26 18:54:19 2018 +++ src/sys/kern/kern_threadpool.c Wed Dec 26 20:08:22 2018 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_threadpool.c,v 1.4 2018/12/26 18:54:19 thorpej Exp $ */ +/* $NetBSD: kern_threadpool.c,v 1.5 2018/12/26 20:08:22 thorpej Exp $ */ /*- * Copyright (c) 2014, 2018 The NetBSD Foundation, Inc. @@ -81,7 +81,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.4 2018/12/26 18:54:19 thorpej Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_threadpool.c,v 1.5 2018/12/26 20:08:22 thorpej Exp $"); #include <sys/types.h> #include <sys/param.h> @@ -161,7 +161,7 @@ struct threadpool_unbound { /* protected by threadpools_lock */ LIST_ENTRY(threadpool_unbound) tpu_link; - unsigned int tpu_refcnt; + uint64_t tpu_refcnt; }; static LIST_HEAD(, threadpool_unbound) unbound_threadpools; @@ -198,7 +198,7 @@ struct threadpool_percpu { /* protected by threadpools_lock */ LIST_ENTRY(threadpool_percpu) tpp_link; - unsigned int tpp_refcnt; + uint64_t tpp_refcnt; }; static LIST_HEAD(, threadpool_percpu) percpu_threadpools; @@ -428,13 +428,8 @@ threadpool_get(struct threadpool **poolp } } KASSERT(tpu != NULL); - if (tpu->tpu_refcnt == UINT_MAX) { - mutex_exit(&threadpools_lock); - if (tmp != NULL) - threadpool_destroy(&tmp->tpu_pool, sizeof(*tpu)); - return EBUSY; - } tpu->tpu_refcnt++; + KASSERT(tpu->tpu_refcnt != 0); mutex_exit(&threadpools_lock); if (tmp != NULL) @@ -463,8 +458,9 @@ threadpool_put(struct threadpool *pool, TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n", __func__, (int)pri)); threadpool_remove_unbound(tpu); - } else + } else { tpu = NULL; + } mutex_exit(&threadpools_lock); if (tpu) @@ -507,13 +503,8 @@ threadpool_percpu_get(struct threadpool_ } } KASSERT(pool_percpu != NULL); - if (pool_percpu->tpp_refcnt == UINT_MAX) { - mutex_exit(&threadpools_lock); - if (tmp != NULL) - threadpool_percpu_destroy(tmp); - return EBUSY; - } pool_percpu->tpp_refcnt++; + KASSERT(pool_percpu->tpp_refcnt != 0); mutex_exit(&threadpools_lock); if (tmp != NULL) @@ -540,8 +531,9 @@ threadpool_percpu_put(struct threadpool_ TP_LOG(("%s: Last reference for pri=%d, destroying pool.\n", __func__, (int)pri)); threadpool_remove_percpu(pool_percpu); - } else + } else { pool_percpu = NULL; + } mutex_exit(&threadpools_lock); if (pool_percpu)