Hi. Currently kcov is enabled on a per process (pid) basis. A process with multiple threads therefore share the same coverage buffer which leads to non-deterministic results. Instead, kcov should be enabled on a per thread basis; just like how kcov behaves on Linux and FreeBSD. The decision to trace processes made development easier initially but I'd like to get this right now.
visa@ gave some helpful pointers since he did similar work related to witness and the p_sleeplocks member in struct proc. He also pointed out that conditionally (#ifdef) adding a member to struct proc is a bad idea since it's part of userland-visible ABI. Another pleasant side-effect of this change is the removed linear search for the corresponding kcov descriptor inside __sanitizer_cov_trace_pc(). Survived a make release on amd64. Comments? OK? Index: dev/kcov.c =================================================================== RCS file: /cvs/src/sys/dev/kcov.c,v retrieving revision 1.2 diff -u -p -r1.2 kcov.c --- dev/kcov.c 21 Aug 2018 18:06:12 -0000 1.2 +++ dev/kcov.c 23 Aug 2018 06:05:58 -0000 @@ -41,7 +41,6 @@ struct kd { KCOV_MODE_TRACE_PC, } kd_mode; int kd_unit; /* device minor */ - pid_t kd_pid; /* process being traced */ uintptr_t *kd_buf; /* traced coverage */ size_t kd_nmemb; size_t kd_size; @@ -54,7 +53,6 @@ void kcovattach(int); int kd_alloc(struct kd *, unsigned long); struct kd *kd_lookup(int); -static inline struct kd *kd_lookup_pid(pid_t); static inline int inintr(void); TAILQ_HEAD(, kd) kd_list = TAILQ_HEAD_INITIALIZER(kd_list); @@ -69,8 +67,8 @@ int kcov_debug = 1; * each block instructions that maps to a single line in the original source * code. * - * If kcov is enabled for the current process, the executed address will be - * stored in the corresponding coverage buffer. + * If kcov is enabled for the current thread, the kernel program counter will + * be stored in its corresponding coverage buffer. * The first element in the coverage buffer holds the index of next available * element. */ @@ -89,8 +87,8 @@ __sanitizer_cov_trace_pc(void) if (inintr()) return; - kd = kd_lookup_pid(curproc->p_p->ps_pid); - if (kd == NULL) + kd = curproc->p_kd; + if (kd == NULL || kd->kd_mode != KCOV_MODE_TRACE_PC) return; idx = kd->kd_buf[0]; @@ -132,6 +130,9 @@ kcovclose(dev_t dev, int flag, int mode, DPRINTF("%s: unit=%d\n", __func__, minor(dev)); + if (kd == p->p_kd) + p->p_kd = NULL; + TAILQ_REMOVE(&kd_list, kd, kd_entry); free(kd->kd_buf, M_SUBPROC, kd->kd_size); free(kd, M_SUBPROC, sizeof(struct kd)); @@ -159,30 +160,30 @@ kcovioctl(dev_t dev, u_long cmd, caddr_t kd->kd_mode = KCOV_MODE_INIT; break; case KIOENABLE: - if (kd->kd_mode != KCOV_MODE_INIT) { + /* Only one kcov descriptor can be enabled per thread. */ + if (p->p_kd != NULL || kd->kd_mode != KCOV_MODE_INIT) { error = EBUSY; break; } kd->kd_mode = KCOV_MODE_TRACE_PC; - kd->kd_pid = p->p_p->ps_pid; + p->p_kd = kd; break; case KIODISABLE: - /* Only the enabled process may disable itself. */ - if (kd->kd_pid != p->p_p->ps_pid || - kd->kd_mode != KCOV_MODE_TRACE_PC) { + /* Only the enabled thread may disable itself. */ + if (p->p_kd != kd || kd->kd_mode != KCOV_MODE_TRACE_PC) { error = EBUSY; break; } kd->kd_mode = KCOV_MODE_INIT; - kd->kd_pid = 0; + p->p_kd = NULL; break; default: error = EINVAL; DPRINTF("%s: %lu: unknown command\n", __func__, cmd); } - DPRINTF("%s: unit=%d, mode=%d, pid=%d, error=%d\n", - __func__, kd->kd_unit, kd->kd_mode, kd->kd_pid, error); + DPRINTF("%s: unit=%d, mode=%d, error=%d\n", + __func__, kd->kd_unit, kd->kd_mode, error); return (error); } @@ -212,12 +213,14 @@ kcov_exit(struct proc *p) { struct kd *kd; - kd = kd_lookup_pid(p->p_p->ps_pid); + kd = p->p_kd; if (kd == NULL) return; + DPRINTF("%s: unit=%d\n", __func__, kd->kd_unit); + kd->kd_mode = KCOV_MODE_INIT; - kd->kd_pid = 0; + p->p_kd = NULL; } struct kd * @@ -248,18 +251,6 @@ kd_alloc(struct kd *kd, unsigned long nm kd->kd_nmemb = nmemb - 1; kd->kd_size = size; return (0); -} - -static inline struct kd * -kd_lookup_pid(pid_t pid) -{ - struct kd *kd; - - TAILQ_FOREACH(kd, &kd_list, kd_entry) { - if (kd->kd_pid == pid && kd->kd_mode == KCOV_MODE_TRACE_PC) - return (kd); - } - return (NULL); } static inline int Index: kern/kern_exit.c =================================================================== RCS file: /cvs/src/sys/kern/kern_exit.c,v retrieving revision 1.168 diff -u -p -r1.168 kern_exit.c --- kern/kern_exit.c 21 Aug 2018 18:06:12 -0000 1.168 +++ kern/kern_exit.c 23 Aug 2018 06:05:58 -0000 @@ -181,6 +181,10 @@ exit1(struct proc *p, int rv, int flags) } p->p_siglist = 0; +#if NKCOV > 0 + kcov_exit(p); +#endif + if ((p->p_flag & P_THREAD) == 0) { /* close open files and release open-file table */ fdfree(p); @@ -192,10 +196,6 @@ exit1(struct proc *p, int rv, int flags) killjobc(pr); #ifdef ACCOUNTING acct_process(p); -#endif - -#if NKCOV > 0 - kcov_exit(p); #endif #ifdef KTRACE Index: kern/kern_fork.c =================================================================== RCS file: /cvs/src/sys/kern/kern_fork.c,v retrieving revision 1.205 diff -u -p -r1.205 kern_fork.c --- kern/kern_fork.c 20 Jul 2018 07:28:36 -0000 1.205 +++ kern/kern_fork.c 23 Aug 2018 06:05:58 -0000 @@ -65,6 +65,8 @@ #include <uvm/uvm.h> #include <machine/tcb.h> +#include "kcov.h" + int nprocesses = 1; /* process 0 */ int nthreads = 1; /* proc 0 */ int randompid; /* when set to 1, pid's go random */ @@ -176,6 +178,10 @@ thread_new(struct proc *parent, vaddr_t #ifdef WITNESS p->p_sleeplocks = NULL; +#endif + +#if NKCOV > 0 + p->p_kd = NULL; #endif return p; Index: sys/proc.h =================================================================== RCS file: /cvs/src/sys/sys/proc.h,v retrieving revision 1.256 diff -u -p -r1.256 proc.h --- sys/proc.h 13 Aug 2018 15:26:17 -0000 1.256 +++ sys/proc.h 23 Aug 2018 06:05:58 -0000 @@ -289,6 +289,7 @@ struct process { struct lock_list_entry; +struct kd; struct proc { TAILQ_ENTRY(proc) p_runq; @@ -374,6 +375,8 @@ struct proc { u_short p_xstat; /* Exit status for wait; also stop signal. */ struct lock_list_entry *p_sleeplocks; + + struct kd *p_kd; /* kcov descriptor */ }; /* Status values. */