Module Name:    src
Committed By:   ad
Date:           Sat Jun 13 23:58:52 UTC 2020

Modified Files:
        src/sys/arch/amd64/amd64: cpufunc.S
        src/sys/arch/amd64/include: proc.h
        src/sys/arch/i386/i386: cpufunc.S
        src/sys/arch/i386/include: proc.h
        src/sys/arch/x86/x86: tsc.c

Log Message:
Print a rate limited warning if the TSC timecounter goes backwards from the
viewpoint of any single LWP.


To generate a diff of this commit:
cvs rdiff -u -r1.59 -r1.60 src/sys/arch/amd64/amd64/cpufunc.S
cvs rdiff -u -r1.24 -r1.25 src/sys/arch/amd64/include/proc.h
cvs rdiff -u -r1.45 -r1.46 src/sys/arch/i386/i386/cpufunc.S
cvs rdiff -u -r1.47 -r1.48 src/sys/arch/i386/include/proc.h
cvs rdiff -u -r1.48 -r1.49 src/sys/arch/x86/x86/tsc.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/cpufunc.S
diff -u src/sys/arch/amd64/amd64/cpufunc.S:1.59 src/sys/arch/amd64/amd64/cpufunc.S:1.60
--- src/sys/arch/amd64/amd64/cpufunc.S:1.59	Mon Jun  1 22:58:06 2020
+++ src/sys/arch/amd64/amd64/cpufunc.S	Sat Jun 13 23:58:51 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.S,v 1.59 2020/06/01 22:58:06 ad Exp $	*/
+/*	$NetBSD: cpufunc.S,v 1.60 2020/06/13 23:58:51 ad Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008, 2020 The NetBSD Foundation, Inc.
@@ -206,8 +206,10 @@ ENTRY(x86_hotpatch)
 END(x86_hotpatch)
 #endif /* !XENPV */
 
-/* Could be exact same as cpu_counter, but KMSAN needs to have the correct
- * size of the return value. */
+/*
+ * Could be exact same as cpu_counter, but KMSAN needs to have the correct
+ * size of the return value.
+ */
 ENTRY(cpu_counter32)
 	movq	CPUVAR(CURLWP), %rcx
 1:
@@ -221,7 +223,6 @@ ENTRY(cpu_counter32)
 2:
 	jmp	1b
 END(cpu_counter32)
-STRONG_ALIAS(tsc_get_timecount, cpu_counter32)
 
 ENTRY(cpu_counter)
 	movq	CPUVAR(CURLWP), %rcx

Index: src/sys/arch/amd64/include/proc.h
diff -u src/sys/arch/amd64/include/proc.h:1.24 src/sys/arch/amd64/include/proc.h:1.25
--- src/sys/arch/amd64/include/proc.h:1.24	Mon Jan 13 00:26:52 2020
+++ src/sys/arch/amd64/include/proc.h	Sat Jun 13 23:58:51 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: proc.h,v 1.24 2020/01/13 00:26:52 ad Exp $	*/
+/*	$NetBSD: proc.h,v 1.25 2020/06/13 23:58:51 ad Exp $	*/
 
 /*
  * Copyright (c) 1991 Regents of the University of California.
@@ -46,6 +46,7 @@ struct pmap;
 struct vm_page;
 
 struct mdlwp {
+	volatile uint64_t md_tsc;	/* last TSC reading */
 	struct	trapframe *md_regs;	/* registers on current frame */
 	int	md_flags;		/* machine-dependent flags */
 	volatile int md_astpending;

Index: src/sys/arch/i386/i386/cpufunc.S
diff -u src/sys/arch/i386/i386/cpufunc.S:1.45 src/sys/arch/i386/i386/cpufunc.S:1.46
--- src/sys/arch/i386/i386/cpufunc.S:1.45	Thu May 28 20:03:19 2020
+++ src/sys/arch/i386/i386/cpufunc.S	Sat Jun 13 23:58:52 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.S,v 1.45 2020/05/28 20:03:19 ad Exp $	*/
+/*	$NetBSD: cpufunc.S,v 1.46 2020/06/13 23:58:52 ad Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2007, 2020 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
 #include <sys/errno.h>
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.45 2020/05/28 20:03:19 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.46 2020/06/13 23:58:52 ad Exp $");
 
 #include "opt_xen.h"
 
@@ -163,7 +163,7 @@ ENTRY(msr_onfault)
 	ret
 END(msr_onfault)
 
-ENTRY(tsc_get_timecount)
+ENTRY(cpu_counter)
 	pushl	%ebx
 	movl	CPUVAR(CURLWP), %ecx
 1:
@@ -177,10 +177,9 @@ ENTRY(tsc_get_timecount)
 	ret
 2:
 	jmp	1b
-END(tsc_get_timecount)
+END(cpu_counter)
 
-STRONG_ALIAS(cpu_counter, tsc_get_timecount)
-STRONG_ALIAS(cpu_counter32, tsc_get_timecount)
+STRONG_ALIAS(cpu_counter32, cpu_counter)
 
 ENTRY(breakpoint)
 	pushl	%ebp

Index: src/sys/arch/i386/include/proc.h
diff -u src/sys/arch/i386/include/proc.h:1.47 src/sys/arch/i386/include/proc.h:1.48
--- src/sys/arch/i386/include/proc.h:1.47	Mon Jan 13 00:26:52 2020
+++ src/sys/arch/i386/include/proc.h	Sat Jun 13 23:58:52 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: proc.h,v 1.47 2020/01/13 00:26:52 ad Exp $	*/
+/*	$NetBSD: proc.h,v 1.48 2020/06/13 23:58:52 ad Exp $	*/
 
 /*
  * Copyright (c) 1991 Regents of the University of California.
@@ -46,6 +46,7 @@ struct vm_page;
 #define	MDL_FPU_IN_CPU		0x0020	/* the FPU state is in the CPU */
 
 struct mdlwp {
+	volatile uint64_t md_tsc;	/* last TSC reading */
 	struct	trapframe *md_regs;	/* registers on current frame */
 	int	md_flags;		/* machine-dependent flags */
 	volatile int md_astpending;	/* AST pending for this process */

Index: src/sys/arch/x86/x86/tsc.c
diff -u src/sys/arch/x86/x86/tsc.c:1.48 src/sys/arch/x86/x86/tsc.c:1.49
--- src/sys/arch/x86/x86/tsc.c:1.48	Wed May 27 18:46:15 2020
+++ src/sys/arch/x86/x86/tsc.c	Sat Jun 13 23:58:52 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: tsc.c,v 1.48 2020/05/27 18:46:15 ad Exp $	*/
+/*	$NetBSD: tsc.c,v 1.49 2020/06/13 23:58:52 ad Exp $	*/
 
 /*-
  * Copyright (c) 2008, 2020 The NetBSD Foundation, Inc.
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.48 2020/05/27 18:46:15 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.49 2020/06/13 23:58:52 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -38,6 +38,7 @@ __KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.48
 #include <sys/kernel.h>
 #include <sys/cpu.h>
 #include <sys/xcall.h>
+#include <sys/lock.h>
 
 #include <machine/cpu_counter.h>
 #include <machine/cpuvar.h>
@@ -50,7 +51,7 @@ __KERNEL_RCSID(0, "$NetBSD: tsc.c,v 1.48
 #define	TSC_SYNC_ROUNDS		1000
 #define	ABS(a)			((a) >= 0 ? (a) : -(a))
 
-u_int	tsc_get_timecount(struct timecounter *);
+static u_int	tsc_get_timecount(struct timecounter *);
 
 static void	tsc_delay(unsigned int);
 
@@ -352,3 +353,36 @@ tsc_delay(unsigned int us)
 		x86_pause();
 	}
 }
+
+static u_int
+tsc_get_timecount(struct timecounter *tc)
+{
+	static __cpu_simple_lock_t lock = __SIMPLELOCK_UNLOCKED;
+	static int lastwarn;
+	uint64_t cur, prev;
+	lwp_t *l = curlwp;
+	int ticks;
+
+	/*
+	 * Previous value must be read before the counter and stored to
+	 * after, because this routine can be called from interrupt context
+	 * and may run over the top of an existing invocation.  Ordering is
+	 * guaranteed by "volatile" on md_tsc.
+	 */
+	prev = l->l_md.md_tsc;
+	cur = cpu_counter();
+	if (__predict_false(cur < prev)) {
+		if ((cur >> 63) == (prev >> 63) &&
+		    __cpu_simple_lock_try(&lock)) {
+			ticks = getticks();
+			if (ticks - lastwarn >= hz) {
+				printf("WARNING: TSC time went backwards "
+				    " by %u\n", (unsigned)(prev - cur));
+				lastwarn = ticks;
+			}
+			__cpu_simple_unlock(&lock);
+		}
+	}
+	l->l_md.md_tsc = cur;
+	return (uint32_t)cur;
+}

Reply via email to