On Fri, Jan 18, 2019 at 04:58:16PM +0100, Alexander Bluhm wrote:
> On Fri, Jan 18, 2019 at 04:38:55PM +0100, Anton Lindqvist wrote:
> > > - You allocate nmemb * sizeof(uintptr_t) bytes.
> > > - kd_buf[nmemb - 1] is the largest valid index.
> > > - kd_nmemb is nmemb - 1.
> > > - kd_buf[idx * 4 + 4] is the access at the largest position. 
> > > - So return if (idx * 4 + 4 > kd_nmemb).
> > 
> > You're right, I forgot about the `kd_nmemb = nmemb - 1' initialization.
> > I went ahead and matched the existing style in
> > __sanitizer_cov_trace_pc() but inverting the conditional could be done
> > later.
> 
> As long as the syle is the same, it is fine.  Then I have to think
> about it only once.
> 
> > +   idx = kd->kd_buf[0];
> > +   if (idx * 4 + 4 < kd->kd_nmemb) {
> > +           kd->kd_buf[idx * 4 + 1] = type;
> > +           kd->kd_buf[idx * 4 + 2] = arg1;
> > +           kd->kd_buf[idx * 4 + 3] = arg2;
> > +           kd->kd_buf[idx * 4 + 4] = pc;
> > +           kd->kd_buf[0] = idx + 1;
> > +   }
> > +}
> 
> You changed an off by two to an off by one.  Compare trace_cmp()
> with __sanitizer_cov_trace_pc().
> 
>         idx = kd->kd_buf[0];
>         if (idx < kd->kd_nmemb) {
>                 kd->kd_buf[idx + 1] = (uintptr_t)__builtin_return_address(0);
>                 kd->kd_buf[0] = idx + 1;
>         }
> 
> One of them must be wrong as the largest index accessed and the
> index in the check differ.  I would prefer "if (idx * 4 + 4 <=
> kd->kd_nmemb)" and "if (idx + 1 <= kd->kd_nmemb)".  The latter is
> equivalent to the existing check.

You're right, updated diff.

Index: share/man/man4/kcov.4
===================================================================
RCS file: /cvs/src/share/man/man4/kcov.4,v
retrieving revision 1.6
diff -u -p -r1.6 kcov.4
--- share/man/man4/kcov.4       27 Dec 2018 19:33:08 -0000      1.6
+++ share/man/man4/kcov.4       18 Jan 2019 18:26:17 -0000
@@ -62,9 +62,35 @@ Enable code coverage tracing for the cur
 The
 .Fa mode
 must be one of the following:
-.Bl -tag -width KCOV_MODE_TRACE_PC
+.Bl -ohang
 .It Dv KCOV_MODE_TRACE_PC
 Trace the kernel program counter.
+.It Dv KCOV_MODE_TRACE_CMP
+Trace comparison instructions and switch statements.
+For switch statements, the number of traced comparison instructions is equal to
+the number of switch cases.
+Each traced comparison instruction is represented by 4 entries in the coverage
+buffer:
+.Bl -enum
+.It
+A mask where the least significant bit is set if one of the comparison operands
+is a compile-time constant, which is always true for switch statements.
+The remaining bits represents the log2 size of the operands, ranging from 0 to
+3.
+.It
+First comparison operand.
+For switch statements, this operand corresponds to the case value.
+.It
+Second comparison operand.
+For switch statements, this operand corresponds to the value passed to switch.
+.It
+Kernel program counter where the comparison instruction took place.
+.El
+.Pp
+In this mode, the first entry in the coverage buffer reflects the number of
+traced comparison instructions.
+Thus, the effective number of entries in the coverage buffer is given by
+multiplying the first entry by 4.
 .El
 .It Dv KIODISABLE Fa void
 Disable code coverage tracing for the current thread.
Index: sys/arch/amd64/conf/Makefile.amd64
===================================================================
RCS file: /cvs/src/sys/arch/amd64/conf/Makefile.amd64,v
retrieving revision 1.108
diff -u -p -r1.108 Makefile.amd64
--- sys/arch/amd64/conf/Makefile.amd64  7 Jan 2019 20:24:59 -0000       1.108
+++ sys/arch/amd64/conf/Makefile.amd64  18 Jan 2019 18:26:17 -0000
@@ -95,7 +95,7 @@ LINKFLAGS+=   -S
 .endif
 
 .if ${SYSTEM_OBJ:Mkcov.o} && ${COMPILER_VERSION:Mclang}
-PROF=          -fsanitize-coverage=trace-pc
+PROF=          -fsanitize-coverage=trace-pc,trace-cmp
 .endif
 
 %LOAD
@@ -152,7 +152,7 @@ vers.o: ${SYSTEM_DEP:Ngap.o}
 
 .if ${SYSTEM_OBJ:Mkcov.o} && ${COMPILER_VERSION:Mclang}
 kcov.o: $S/dev/kcov.c
-       ${NORMAL_C} -fno-sanitize-coverage=trace-pc
+       ${NORMAL_C} -fno-sanitize-coverage=trace-pc,trace-cmp
 .endif
 
 clean:
Index: sys/arch/i386/conf/Makefile.i386
===================================================================
RCS file: /cvs/src/sys/arch/i386/conf/Makefile.i386,v
retrieving revision 1.130
diff -u -p -r1.130 Makefile.i386
--- sys/arch/i386/conf/Makefile.i386    30 Oct 2018 11:08:30 -0000      1.130
+++ sys/arch/i386/conf/Makefile.i386    18 Jan 2019 18:26:17 -0000
@@ -97,7 +97,7 @@ LINKFLAGS+=   -S
 .endif
 
 .if ${SYSTEM_OBJ:Mkcov.o} && ${COMPILER_VERSION:Mclang}
-PROF=          -fsanitize-coverage=trace-pc
+PROF=          -fsanitize-coverage=trace-pc,trace-cmp
 .endif
 
 %LOAD
@@ -148,7 +148,7 @@ vers.o: ${SYSTEM_DEP:Ngap.o}
 
 .if ${SYSTEM_OBJ:Mkcov.o} && ${COMPILER_VERSION:Mclang}
 kcov.o: $S/dev/kcov.c
-       ${NORMAL_C} -fno-sanitize-coverage=trace-pc
+       ${NORMAL_C} -fno-sanitize-coverage=trace-pc,trace-cmp
 .endif
 
 clean:
Index: sys/dev/kcov.c
===================================================================
RCS file: /cvs/src/sys/dev/kcov.c,v
retrieving revision 1.10
diff -u -p -r1.10 kcov.c
--- sys/dev/kcov.c      16 Jan 2019 19:27:07 -0000      1.10
+++ sys/dev/kcov.c      18 Jan 2019 18:26:17 -0000
@@ -26,6 +26,9 @@
 
 #include <uvm/uvm_extern.h>
 
+#define KCOV_CMP_CONST         0x1
+#define KCOV_CMP_SIZE(x)       ((x) << 1)
+
 /* #define KCOV_DEBUG */
 #ifdef KCOV_DEBUG
 #define DPRINTF(x...) do { if (kcov_debug) printf(x); } while (0)
@@ -99,12 +102,140 @@ __sanitizer_cov_trace_pc(void)
                return;
 
        idx = kd->kd_buf[0];
-       if (idx < kd->kd_nmemb) {
+       if (idx + 1 <= kd->kd_nmemb) {
                kd->kd_buf[idx + 1] = (uintptr_t)__builtin_return_address(0);
                kd->kd_buf[0] = idx + 1;
        }
 }
 
+/*
+ * Compiling the kernel with the `-fsanitize-coverage=trace-cmp' option will
+ * cause the following function to be called upon integer comparisons and 
switch
+ * statements.
+ *
+ * If kcov is enabled for the current thread, the comparison will be stored in
+ * its corresponding coverage buffer.
+ */
+void
+trace_cmp(uint64_t type, uint64_t arg1, uint64_t arg2, uintptr_t pc)
+{
+       struct kcov_dev *kd;
+       uint64_t idx;
+
+       /*
+        * Do not trace before kcovopen() has been called at least once.
+        * At this point, all secondary CPUs have booted and accessing curcpu()
+        * is safe.
+        */
+       if (kcov_cold)
+               return;
+
+       /* Do not trace in interrupts to prevent noisy coverage. */
+       if (inintr())
+               return;
+
+       kd = curproc->p_kd;
+       if (kd == NULL || kd->kd_mode != KCOV_MODE_TRACE_CMP)
+               return;
+
+       idx = kd->kd_buf[0];
+       if (idx * 4 + 4 <= kd->kd_nmemb) {
+               kd->kd_buf[idx * 4 + 1] = type;
+               kd->kd_buf[idx * 4 + 2] = arg1;
+               kd->kd_buf[idx * 4 + 3] = arg2;
+               kd->kd_buf[idx * 4 + 4] = pc;
+               kd->kd_buf[0] = idx + 1;
+       }
+}
+
+void
+__sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
+{
+       trace_cmp(KCOV_CMP_SIZE(0), arg1, arg2,
+           (uintptr_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
+{
+       trace_cmp(KCOV_CMP_SIZE(1), arg1, arg2,
+           (uintptr_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
+{
+       trace_cmp(KCOV_CMP_SIZE(2), arg1, arg2,
+           (uintptr_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
+{
+       trace_cmp(KCOV_CMP_SIZE(3), arg1, arg2,
+           (uintptr_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
+{
+       trace_cmp(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
+           (uintptr_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
+{
+       trace_cmp(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
+           (uintptr_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
+{
+       trace_cmp(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
+           (uintptr_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
+{
+       trace_cmp(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
+           (uintptr_t)__builtin_return_address(0));
+}
+
+void
+__sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
+{
+       uint64_t i, nbits, ncases, type;
+       uintptr_t pc;
+
+       pc = (uintptr_t)__builtin_return_address(0);
+       ncases = cases[0];
+       nbits = cases[1];
+
+       switch (nbits) {
+       case 8:
+               type = KCOV_CMP_SIZE(0);
+               break;
+       case 16:
+               type = KCOV_CMP_SIZE(1);
+               break;
+       case 32:
+               type = KCOV_CMP_SIZE(2);
+               break;
+       case 64:
+               type = KCOV_CMP_SIZE(3);
+               break;
+       default:
+               return;
+       }
+       type |= KCOV_CMP_CONST;
+
+       for (i = 0; i < ncases; i++)
+               trace_cmp(type, cases[i + 2], val, pc);
+}
+
 void
 kcovattach(int count)
 {
@@ -173,7 +304,7 @@ kcovioctl(dev_t dev, u_long cmd, caddr_t
                        break;
                }
                mode = *((int *)data);
-               if (mode != KCOV_MODE_TRACE_PC) {
+               if (mode != KCOV_MODE_TRACE_PC && mode != KCOV_MODE_TRACE_CMP) {
                        error = EINVAL;
                        break;
                }
Index: sys/sys/kcov.h
===================================================================
RCS file: /cvs/src/sys/sys/kcov.h,v
retrieving revision 1.3
diff -u -p -r1.3 kcov.h
--- sys/sys/kcov.h      27 Dec 2018 19:33:08 -0000      1.3
+++ sys/sys/kcov.h      18 Jan 2019 18:26:17 -0000
@@ -27,6 +27,7 @@
 
 #define KCOV_MODE_NONE         0
 #define KCOV_MODE_TRACE_PC     1
+#define KCOV_MODE_TRACE_CMP    2
 
 #ifdef _KERNEL
 

Reply via email to