Module Name:    src
Committed By:   christos
Date:           Tue Jan 22 22:09:44 UTC 2013

Modified Files:
        src/sys/arch/x86/include: lock.h

Log Message:
Allow for non inlined definitions for RUMP


To generate a diff of this commit:
cvs rdiff -u -r1.26 -r1.27 src/sys/arch/x86/include/lock.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/x86/include/lock.h
diff -u src/sys/arch/x86/include/lock.h:1.26 src/sys/arch/x86/include/lock.h:1.27
--- src/sys/arch/x86/include/lock.h:1.26	Thu Oct 11 07:12:21 2012
+++ src/sys/arch/x86/include/lock.h	Tue Jan 22 17:09:44 2013
@@ -1,4 +1,4 @@
-/*	$NetBSD: lock.h,v 1.26 2012/10/11 11:12:21 apb Exp $	*/
+/*	$NetBSD: lock.h,v 1.27 2013/01/22 22:09:44 christos Exp $	*/
 
 /*-
  * Copyright (c) 2000, 2006 The NetBSD Foundation, Inc.
@@ -65,33 +65,25 @@ __cpu_simple_lock_clear(__cpu_simple_loc
 }
 
 #ifdef _HARDKERNEL
+# include <machine/cpufunc.h>
+# define SPINLOCK_SPIN_HOOK	/* nothing */
+# ifdef SPINLOCK_BACKOFF_HOOK
+#  undef SPINLOCK_BACKOFF_HOOK
+# endif
+# define SPINLOCK_BACKOFF_HOOK	x86_pause()
+# define SPINLOCK_INLINE
+#else /* !_HARDKERNEL */
+# define SPINLOCK_BODY
+# define SPINLOCK_INLINE static __inline __unused
+#endif /* _HARDKERNEL */
+
+SPINLOCK_INLINE void	__cpu_simple_lock_init(__cpu_simple_lock_t *);
+SPINLOCK_INLINE void	__cpu_simple_lock(__cpu_simple_lock_t *);
+SPINLOCK_INLINE int	__cpu_simple_lock_try(__cpu_simple_lock_t *);
+SPINLOCK_INLINE void	__cpu_simple_unlock(__cpu_simple_lock_t *);
 
-#include <machine/cpufunc.h>
-
-void	__cpu_simple_lock_init(__cpu_simple_lock_t *);
-void	__cpu_simple_lock(__cpu_simple_lock_t *);
-int	__cpu_simple_lock_try(__cpu_simple_lock_t *);
-void	__cpu_simple_unlock(__cpu_simple_lock_t *);
-
-#define	SPINLOCK_SPIN_HOOK	/* nothing */
-
-#ifdef SPINLOCK_BACKOFF_HOOK
-#undef SPINLOCK_BACKOFF_HOOK
-#endif
-#define	SPINLOCK_BACKOFF_HOOK	x86_pause()
-
-#else
-
-static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
-	__unused;
-static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
-	__unused;
-static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
-	__unused;
-static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
-	__unused;
-
-static __inline void
+#ifdef SPINLOCK_BODY
+SPINLOCK_INLINE void
 __cpu_simple_lock_init(__cpu_simple_lock_t *lockp)
 {
 
@@ -99,7 +91,7 @@ __cpu_simple_lock_init(__cpu_simple_lock
 	__insn_barrier();
 }
 
-static __inline int
+SPINLOCK_INLINE int
 __cpu_simple_lock_try(__cpu_simple_lock_t *lockp)
 {
 	uint8_t val;
@@ -112,7 +104,7 @@ __cpu_simple_lock_try(__cpu_simple_lock_
 	return val == __SIMPLELOCK_UNLOCKED;
 }
 
-static __inline void
+SPINLOCK_INLINE void
 __cpu_simple_lock(__cpu_simple_lock_t *lockp)
 {
 
@@ -173,7 +165,7 @@ __cpu_simple_lock(__cpu_simple_lock_t *l
  *		reordered, however stores act as load fences, meaning that
  *		loads can not be reordered around stores.
  */
-static __inline void
+SPINLOCK_INLINE void
 __cpu_simple_unlock(__cpu_simple_lock_t *lockp)
 {
 
@@ -181,6 +173,6 @@ __cpu_simple_unlock(__cpu_simple_lock_t 
 	*lockp = __SIMPLELOCK_UNLOCKED;
 }
 
-#endif	/* _HARDKERNEL */
+#endif	/* SPINLOCK_BODY */
 
 #endif /* _X86_LOCK_H_ */

Reply via email to