On Thu, Aug 23, 2018 at 08:16:06AM +0200, Anton Lindqvist wrote: > Hi. > Currently kcov is enabled on a per process (pid) basis. A process with > multiple threads therefore share the same coverage buffer which leads to > non-deterministic results. Instead, kcov should be enabled on a per > thread basis; just like how kcov behaves on Linux and FreeBSD. The > decision to trace processes made development easier initially but I'd > like to get this right now. > > visa@ gave some helpful pointers since he did similar work related to > witness and the p_sleeplocks member in struct proc. He also pointed out > that conditionally (#ifdef) adding a member to struct proc is a bad idea > since it's part of userland-visible ABI. > > Another pleasant side-effect of this change is the removed linear search > for the corresponding kcov descriptor inside __sanitizer_cov_trace_pc(). > > Survived a make release on amd64. > > Comments? OK?
Both mpi@ and visa@ spotted a problem in kcovclose(): if one thread is currently being traced and another one decides to close the /dev/kcov file descriptor. Here's an updated diff in which this problem is solved by delaying the freeing until kcov_exit() is called upon exit. Index: dev/kcov.c =================================================================== RCS file: /cvs/src/sys/dev/kcov.c,v retrieving revision 1.2 diff -u -p -r1.2 kcov.c --- dev/kcov.c 21 Aug 2018 18:06:12 -0000 1.2 +++ dev/kcov.c 24 Aug 2018 06:22:44 -0000 @@ -39,9 +39,9 @@ struct kd { KCOV_MODE_DISABLED, KCOV_MODE_INIT, KCOV_MODE_TRACE_PC, + KCOV_MODE_DYING, } kd_mode; int kd_unit; /* device minor */ - pid_t kd_pid; /* process being traced */ uintptr_t *kd_buf; /* traced coverage */ size_t kd_nmemb; size_t kd_size; @@ -52,9 +52,9 @@ struct kd { void kcovattach(int); int kd_alloc(struct kd *, unsigned long); +void kd_free(struct kd *); struct kd *kd_lookup(int); -static inline struct kd *kd_lookup_pid(pid_t); static inline int inintr(void); TAILQ_HEAD(, kd) kd_list = TAILQ_HEAD_INITIALIZER(kd_list); @@ -69,8 +69,8 @@ int kcov_debug = 1; * each block instructions that maps to a single line in the original source * code. * - * If kcov is enabled for the current process, the executed address will be - * stored in the corresponding coverage buffer. + * If kcov is enabled for the current thread, the kernel program counter will + * be stored in its corresponding coverage buffer. * The first element in the coverage buffer holds the index of next available * element. */ @@ -89,8 +89,8 @@ __sanitizer_cov_trace_pc(void) if (inintr()) return; - kd = kd_lookup_pid(curproc->p_p->ps_pid); - if (kd == NULL) + kd = curproc->p_kd; + if (kd == NULL || kd->kd_mode != KCOV_MODE_TRACE_PC) return; idx = kd->kd_buf[0]; @@ -132,9 +132,11 @@ kcovclose(dev_t dev, int flag, int mode, DPRINTF("%s: unit=%d\n", __func__, minor(dev)); - TAILQ_REMOVE(&kd_list, kd, kd_entry); - free(kd->kd_buf, M_SUBPROC, kd->kd_size); - free(kd, M_SUBPROC, sizeof(struct kd)); + if (kd->kd_mode == KCOV_MODE_TRACE_PC) + kd->kd_mode = KCOV_MODE_DYING; + else + kd_free(kd); + return (0); } @@ -159,30 +161,30 @@ kcovioctl(dev_t dev, u_long cmd, caddr_t kd->kd_mode = KCOV_MODE_INIT; break; case KIOENABLE: - if (kd->kd_mode != KCOV_MODE_INIT) { + /* Only one kcov descriptor can be enabled per thread. */ + if (p->p_kd != NULL || kd->kd_mode != KCOV_MODE_INIT) { error = EBUSY; break; } kd->kd_mode = KCOV_MODE_TRACE_PC; - kd->kd_pid = p->p_p->ps_pid; + p->p_kd = kd; break; case KIODISABLE: - /* Only the enabled process may disable itself. */ - if (kd->kd_pid != p->p_p->ps_pid || - kd->kd_mode != KCOV_MODE_TRACE_PC) { + /* Only the enabled thread may disable itself. */ + if (p->p_kd != kd || kd->kd_mode != KCOV_MODE_TRACE_PC) { error = EBUSY; break; } kd->kd_mode = KCOV_MODE_INIT; - kd->kd_pid = 0; + p->p_kd = NULL; break; default: error = EINVAL; DPRINTF("%s: %lu: unknown command\n", __func__, cmd); } - DPRINTF("%s: unit=%d, mode=%d, pid=%d, error=%d\n", - __func__, kd->kd_unit, kd->kd_mode, kd->kd_pid, error); + DPRINTF("%s: unit=%d, mode=%d, error=%d\n", + __func__, kd->kd_unit, kd->kd_mode, error); return (error); } @@ -212,12 +214,17 @@ kcov_exit(struct proc *p) { struct kd *kd; - kd = kd_lookup_pid(p->p_p->ps_pid); + kd = p->p_kd; if (kd == NULL) return; - kd->kd_mode = KCOV_MODE_INIT; - kd->kd_pid = 0; + DPRINTF("%s: unit=%d\n", __func__, kd->kd_unit); + + if (kd->kd_mode == KCOV_MODE_DYING) + kd_free(kd); + else + kd->kd_mode = KCOV_MODE_INIT; + p->p_kd = NULL; } struct kd * @@ -250,16 +257,14 @@ kd_alloc(struct kd *kd, unsigned long nm return (0); } -static inline struct kd * -kd_lookup_pid(pid_t pid) +void +kd_free(struct kd *kd) { - struct kd *kd; + DPRINTF("%s: unit=%d mode=%d\n", __func__, kd->kd_unit, kd->kd_mode); - TAILQ_FOREACH(kd, &kd_list, kd_entry) { - if (kd->kd_pid == pid && kd->kd_mode == KCOV_MODE_TRACE_PC) - return (kd); - } - return (NULL); + TAILQ_REMOVE(&kd_list, kd, kd_entry); + free(kd->kd_buf, M_SUBPROC, kd->kd_size); + free(kd, M_SUBPROC, sizeof(struct kd)); } static inline int Index: kern/kern_exit.c =================================================================== RCS file: /cvs/src/sys/kern/kern_exit.c,v retrieving revision 1.168 diff -u -p -r1.168 kern_exit.c --- kern/kern_exit.c 21 Aug 2018 18:06:12 -0000 1.168 +++ kern/kern_exit.c 24 Aug 2018 06:22:44 -0000 @@ -181,6 +181,10 @@ exit1(struct proc *p, int rv, int flags) } p->p_siglist = 0; +#if NKCOV > 0 + kcov_exit(p); +#endif + if ((p->p_flag & P_THREAD) == 0) { /* close open files and release open-file table */ fdfree(p); @@ -192,10 +196,6 @@ exit1(struct proc *p, int rv, int flags) killjobc(pr); #ifdef ACCOUNTING acct_process(p); -#endif - -#if NKCOV > 0 - kcov_exit(p); #endif #ifdef KTRACE Index: kern/kern_fork.c =================================================================== RCS file: /cvs/src/sys/kern/kern_fork.c,v retrieving revision 1.205 diff -u -p -r1.205 kern_fork.c --- kern/kern_fork.c 20 Jul 2018 07:28:36 -0000 1.205 +++ kern/kern_fork.c 24 Aug 2018 06:22:44 -0000 @@ -65,6 +65,8 @@ #include <uvm/uvm.h> #include <machine/tcb.h> +#include "kcov.h" + int nprocesses = 1; /* process 0 */ int nthreads = 1; /* proc 0 */ int randompid; /* when set to 1, pid's go random */ @@ -176,6 +178,10 @@ thread_new(struct proc *parent, vaddr_t #ifdef WITNESS p->p_sleeplocks = NULL; +#endif + +#if NKCOV > 0 + p->p_kd = NULL; #endif return p; Index: sys/proc.h =================================================================== RCS file: /cvs/src/sys/sys/proc.h,v retrieving revision 1.256 diff -u -p -r1.256 proc.h --- sys/proc.h 13 Aug 2018 15:26:17 -0000 1.256 +++ sys/proc.h 24 Aug 2018 06:22:44 -0000 @@ -288,6 +288,7 @@ struct process { "\024NOBROADCASTKILL" "\025PLEDGE" "\026WXNEEDED" "\027EXECPLEDGE" ) +struct kd; struct lock_list_entry; struct proc { @@ -374,6 +375,8 @@ struct proc { u_short p_xstat; /* Exit status for wait; also stop signal. */ struct lock_list_entry *p_sleeplocks; + + struct kd *p_kd; /* kcov descriptor */ }; /* Status values. */