Module Name:    src
Committed By:   maxv
Date:           Wed Jul  3 17:24:37 UTC 2019

Modified Files:
        src/sys/arch/amd64/amd64: cpufunc.S
        src/sys/arch/i386/i386: cpufunc.S
        src/sys/arch/x86/include: cpufunc.h

Log Message:
Inline x86_cpuid2(), prerequisite for future changes. Also, add "memory"
on certain other inlines, to make sure GCC does not reorder.


To generate a diff of this commit:
cvs rdiff -u -r1.41 -r1.42 src/sys/arch/amd64/amd64/cpufunc.S
cvs rdiff -u -r1.32 -r1.33 src/sys/arch/i386/i386/cpufunc.S
cvs rdiff -u -r1.32 -r1.33 src/sys/arch/x86/include/cpufunc.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/cpufunc.S
diff -u src/sys/arch/amd64/amd64/cpufunc.S:1.41 src/sys/arch/amd64/amd64/cpufunc.S:1.42
--- src/sys/arch/amd64/amd64/cpufunc.S:1.41	Wed May 29 16:54:41 2019
+++ src/sys/arch/amd64/amd64/cpufunc.S	Wed Jul  3 17:24:37 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.S,v 1.41 2019/05/29 16:54:41 maxv Exp $	*/
+/*	$NetBSD: cpufunc.S,v 1.42 2019/07/03 17:24:37 maxv Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
@@ -358,20 +358,6 @@ ENTRY(x86_mwait)
 	ret
 END(x86_mwait)
 
-ENTRY(x86_cpuid2)
-	movq	%rbx, %r8
-	movq	%rdi, %rax
-	movq	%rsi, %rcx
-	movq	%rdx, %rsi
-	cpuid
-	movl	%eax, 0(%rsi)
-	movl	%ebx, 4(%rsi)
-	movl	%ecx, 8(%rsi)
-	movl	%edx, 12(%rsi)
-	movq	%r8, %rbx
-	ret
-END(x86_cpuid2)
-
 ENTRY(fnsave)
 	fnsave	(%rdi)
 	ret

Index: src/sys/arch/i386/i386/cpufunc.S
diff -u src/sys/arch/i386/i386/cpufunc.S:1.32 src/sys/arch/i386/i386/cpufunc.S:1.33
--- src/sys/arch/i386/i386/cpufunc.S:1.32	Sun May 19 08:17:02 2019
+++ src/sys/arch/i386/i386/cpufunc.S	Wed Jul  3 17:24:37 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.S,v 1.32 2019/05/19 08:17:02 maxv Exp $	*/
+/*	$NetBSD: cpufunc.S,v 1.33 2019/07/03 17:24:37 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
 #include <sys/errno.h>
 
 #include <machine/asm.h>
-__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.32 2019/05/19 08:17:02 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.33 2019/07/03 17:24:37 maxv Exp $");
 
 #include "opt_xen.h"
 
@@ -244,22 +244,6 @@ ENTRY(x86_mwait)  
 	ret
 END(x86_mwait)  
 
-ENTRY(x86_cpuid2)
-	pushl	%ebx
-	pushl	%edi
-	movl	12(%esp), %eax
-	movl	16(%esp), %ecx
-	movl	20(%esp), %edi
-	cpuid
-	movl	%eax, 0(%edi)
-	movl	%ebx, 4(%edi)
-	movl	%ecx, 8(%edi)
-	movl	%edx, 12(%edi)
-	popl	%edi
-	popl	%ebx
-	ret
-END(x86_cpuid2)
-
 ENTRY(fnsave)
 	movl	4(%esp), %eax
 	fnsave	(%eax)

Index: src/sys/arch/x86/include/cpufunc.h
diff -u src/sys/arch/x86/include/cpufunc.h:1.32 src/sys/arch/x86/include/cpufunc.h:1.33
--- src/sys/arch/x86/include/cpufunc.h:1.32	Thu May 30 21:40:40 2019
+++ src/sys/arch/x86/include/cpufunc.h	Wed Jul  3 17:24:37 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpufunc.h,v 1.32 2019/05/30 21:40:40 christos Exp $	*/
+/*	$NetBSD: cpufunc.h,v 1.33 2019/07/03 17:24:37 maxv Exp $	*/
 
 /*
  * Copyright (c) 1998, 2007, 2019 The NetBSD Foundation, Inc.
@@ -112,9 +112,24 @@ void	x86_patch(bool);
 
 void	x86_monitor(const void *, uint32_t, uint32_t);
 void	x86_mwait(uint32_t, uint32_t);
-/* x86_cpuid2() writes four 32bit values, %eax, %ebx, %ecx and %edx */
-#define	x86_cpuid(a,b)	x86_cpuid2((a),0,(b))
-void	x86_cpuid2(uint32_t, uint32_t, uint32_t *);
+
+static inline void
+x86_cpuid2(uint32_t eax, uint32_t ecx, uint32_t *regs)
+{
+	uint32_t ebx, edx;
+
+	__asm volatile (
+		"cpuid"
+		: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+		: "a" (eax), "c" (ecx)
+	);
+
+	regs[0] = eax;
+	regs[1] = ebx;
+	regs[2] = ecx;
+	regs[3] = edx;
+}
+#define x86_cpuid(a,b)	x86_cpuid2((a), 0, (b))
 
 /* -------------------------------------------------------------------------- */
 
@@ -176,6 +191,7 @@ void	setusergs(int);
 			"mov	%[val],%%cr" #crnum	\
 			:				\
 			: [val] "r" (val)		\
+			: "memory"			\
 		);					\
 	}						\
 	static inline register_t rcr##crnum(void)	\
@@ -325,13 +341,13 @@ void x86_enable_intr(void);
 static inline void
 x86_disable_intr(void)
 {
-	__asm volatile ("cli");
+	__asm volatile ("cli" ::: "memory");
 }
 
 static inline void
 x86_enable_intr(void)
 {
-	__asm volatile ("sti");
+	__asm volatile ("sti" ::: "memory");
 }
 #endif /* XENPV */
 

Reply via email to