Module Name:    src
Committed By:   riastradh
Date:           Sat Jan 16 02:20:00 UTC 2021

Modified Files:
        src/sys/kern: kern_clock.c

Log Message:
entropy: Sample cycle counter or timecounter in hardclock.

Only do so when we're short on entropy, in order to minimize
performance impact.

The sampling should stay close to the time of the actual hardclock
timer interrupt, so that the oscillator driving it determines when we
sample the cycle counter or timecounter, which we hope is driven by
an independent oscillator.

If we used a callout, there might be many other influences -- such as
spin lock delays possibly synchronized with this core's cycle counter
-- that could get between the timer interrupt and the sample.

In the glorious tickless future, this should instead be wired up to
the timer interrupt handler, however that manifests in the future
tickless API.


To generate a diff of this commit:
cvs rdiff -u -r1.143 -r1.144 src/sys/kern/kern_clock.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/kern_clock.c
diff -u src/sys/kern/kern_clock.c:1.143 src/sys/kern/kern_clock.c:1.144
--- src/sys/kern/kern_clock.c:1.143	Sat Dec  5 18:17:01 2020
+++ src/sys/kern/kern_clock.c	Sat Jan 16 02:20:00 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_clock.c,v 1.143 2020/12/05 18:17:01 thorpej Exp $	*/
+/*	$NetBSD: kern_clock.c,v 1.144 2021/01/16 02:20:00 riastradh Exp $	*/
 
 /*-
  * Copyright (c) 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -69,7 +69,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.143 2020/12/05 18:17:01 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_clock.c,v 1.144 2021/01/16 02:20:00 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_dtrace.h"
@@ -90,6 +90,7 @@ __KERNEL_RCSID(0, "$NetBSD: kern_clock.c
 #include <sys/timetc.h>
 #include <sys/cpu.h>
 #include <sys/atomic.h>
+#include <sys/rndsource.h>
 
 #ifdef GPROF
 #include <sys/gmon.h>
@@ -138,6 +139,61 @@ static int hardscheddiv; /* hard => sche
 static int psdiv;			/* prof => stat divider */
 int	psratio;			/* ratio: prof / stat */
 
+struct clockrnd {
+	struct krndsource source;
+	unsigned needed;
+};
+
+static struct clockrnd hardclockrnd __aligned(COHERENCY_UNIT);
+static struct clockrnd statclockrnd __aligned(COHERENCY_UNIT);
+
+static void
+clockrnd_get(size_t needed, void *cookie)
+{
+	struct clockrnd *C = cookie;
+
+	/* Start sampling.  */
+	atomic_store_relaxed(&C->needed, 2*NBBY*needed);
+}
+
+static void
+clockrnd_sample(struct clockrnd *C)
+{
+	struct cpu_info *ci = curcpu();
+
+	/* If there's nothing needed right now, stop here.  */
+	if (__predict_true(C->needed == 0))
+		return;
+
+	/*
+	 * If we're not the primary core of a package, we're probably
+	 * driven by the same clock as the primary core, so don't
+	 * bother.
+	 */
+	if (ci != ci->ci_package1st)
+		return;
+
+	/* Take a sample and enter it into the pool.  */
+	rnd_add_uint32(&C->source, 0);
+
+	/*
+	 * On the primary CPU, count down.  Using an atomic decrement
+	 * here isn't really necessary -- on every platform we care
+	 * about, stores to unsigned int are atomic, and the only other
+	 * memory operation that could happen here is for another CPU
+	 * to store a higher value for needed.  But using an atomic
+	 * decrement avoids giving the impression of data races, and is
+	 * unlikely to hurt because only one CPU will ever be writing
+	 * to the location.
+	 */
+	if (CPU_IS_PRIMARY(curcpu())) {
+		unsigned needed __diagused;
+
+		needed = atomic_dec_uint_nv(&C->needed);
+		KASSERT(needed != UINT_MAX);
+	}
+}
+
 static u_int get_intr_timecount(struct timecounter *);
 
 static struct timecounter intr_timecounter = {
@@ -224,6 +280,16 @@ initclocks(void)
 		       SYSCTL_DESCR("Number of hardclock ticks"),
 		       NULL, 0, &hardclock_ticks, sizeof(hardclock_ticks),
 		       CTL_KERN, KERN_HARDCLOCK_TICKS, CTL_EOL);
+
+	rndsource_setcb(&hardclockrnd.source, clockrnd_get, &hardclockrnd);
+	rnd_attach_source(&hardclockrnd.source, "hardclock", RND_TYPE_SKEW,
+	    RND_FLAG_COLLECT_TIME|RND_FLAG_HASCB);
+	if (stathz) {
+		rndsource_setcb(&statclockrnd.source, clockrnd_get,
+		    &statclockrnd);
+		rnd_attach_source(&statclockrnd.source, "statclock",
+		    RND_TYPE_SKEW, RND_FLAG_COLLECT_TIME|RND_FLAG_HASCB);
+	}
 }
 
 /*
@@ -235,6 +301,8 @@ hardclock(struct clockframe *frame)
 	struct lwp *l;
 	struct cpu_info *ci;
 
+	clockrnd_sample(&hardclockrnd);
+
 	ci = curcpu();
 	l = ci->ci_onproc;
 
@@ -338,6 +406,9 @@ statclock(struct clockframe *frame)
 	struct proc *p;
 	struct lwp *l;
 
+	if (stathz)
+		clockrnd_sample(&statclockrnd);
+
 	/*
 	 * Notice changes in divisor frequency, and adjust clock
 	 * frequency accordingly.

Reply via email to