The generic vDSO library expects a vdso/processor.h with an definition of
cpu_relax().

Split out cpu_relax() into this dedicated header.

Signed-off-by: Thomas Weißschuh <thomas.weisssc...@linutronix.de>
---
 arch/sparc/include/asm/processor.h      |  3 +++
 arch/sparc/include/asm/processor_32.h   |  2 --
 arch/sparc/include/asm/processor_64.h   | 25 --------------------
 arch/sparc/include/asm/vdso/processor.h | 41 +++++++++++++++++++++++++++++++++
 4 files changed, 44 insertions(+), 27 deletions(-)

diff --git a/arch/sparc/include/asm/processor.h 
b/arch/sparc/include/asm/processor.h
index 
18295ea625dd7271617c15caa003a173099dd4d0..e34de956519aaca0e9bf82a22000d9096f868968
 100644
--- a/arch/sparc/include/asm/processor.h
+++ b/arch/sparc/include/asm/processor.h
@@ -1,6 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef ___ASM_SPARC_PROCESSOR_H
 #define ___ASM_SPARC_PROCESSOR_H
+
+#include <asm/vdso/processor.h>
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/processor_64.h>
 #else
diff --git a/arch/sparc/include/asm/processor_32.h 
b/arch/sparc/include/asm/processor_32.h
index 
ba8b70ffec085feb17de9050f37de98e0039f7c3..a074d313f4f80621c1bc42733529c6d9450b1275
 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -91,8 +91,6 @@ unsigned long __get_wchan(struct task_struct *);
 extern struct task_struct *last_task_used_math;
 int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
 
-#define cpu_relax()    barrier()
-
 extern void (*sparc_idle)(void);
 
 #endif
diff --git a/arch/sparc/include/asm/processor_64.h 
b/arch/sparc/include/asm/processor_64.h
index 
0a0d5c3d184c751d232a00e73357c0e345695a94..3de65ad6d78592013b1d1158a58497f3821d003c
 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -182,31 +182,6 @@ unsigned long __get_wchan(struct task_struct *task);
 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->tpc)
 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->u_regs[UREG_FP])
 
-/* Please see the commentary in asm/backoff.h for a description of
- * what these instructions are doing and how they have been chosen.
- * To make a long story short, we are trying to yield the current cpu
- * strand during busy loops.
- */
-#ifdef BUILD_VDSO
-#define        cpu_relax()     asm volatile("\n99:\n\t"                        
\
-                                    "rd        %%ccr, %%g0\n\t"        \
-                                    "rd        %%ccr, %%g0\n\t"        \
-                                    "rd        %%ccr, %%g0\n\t"        \
-                                    ::: "memory")
-#else /* ! BUILD_VDSO */
-#define cpu_relax()    asm volatile("\n99:\n\t"                        \
-                                    "rd        %%ccr, %%g0\n\t"        \
-                                    "rd        %%ccr, %%g0\n\t"        \
-                                    "rd        %%ccr, %%g0\n\t"        \
-                                    ".section  .pause_3insn_patch,\"ax\"\n\t"\
-                                    ".word     99b\n\t"                \
-                                    "wr        %%g0, 128, %%asr27\n\t" \
-                                    "nop\n\t"                          \
-                                    "nop\n\t"                          \
-                                    ".previous"                        \
-                                    ::: "memory")
-#endif
-
 /* Prefetch support.  This is tuned for UltraSPARC-III and later.
  * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
  * a shallower prefetch queue than later chips.
diff --git a/arch/sparc/include/asm/vdso/processor.h 
b/arch/sparc/include/asm/vdso/processor.h
new file mode 100644
index 
0000000000000000000000000000000000000000..f7a9adc807f7c9a0444afa51aeb47649a9bdb079
--- /dev/null
+++ b/arch/sparc/include/asm/vdso/processor.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_SPARC_VDSO_PROCESSOR_H
+#define _ASM_SPARC_VDSO_PROCESSOR_H
+
+#include <linux/compiler.h>
+
+#if defined(__arch64__)
+
+/* Please see the commentary in asm/backoff.h for a description of
+ * what these instructions are doing and how they have been chosen.
+ * To make a long story short, we are trying to yield the current cpu
+ * strand during busy loops.
+ */
+#ifdef BUILD_VDSO
+#define        cpu_relax()     asm volatile("\n99:\n\t"                        
\
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    ::: "memory")
+#else /* ! BUILD_VDSO */
+#define cpu_relax()    asm volatile("\n99:\n\t"                        \
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    "rd        %%ccr, %%g0\n\t"        \
+                                    ".section  .pause_3insn_patch,\"ax\"\n\t"\
+                                    ".word     99b\n\t"                \
+                                    "wr        %%g0, 128, %%asr27\n\t" \
+                                    "nop\n\t"                          \
+                                    "nop\n\t"                          \
+                                    ".previous"                        \
+                                    ::: "memory")
+#endif /* BUILD_VDSO */
+
+#else /* ! __arch64__ */
+
+#define cpu_relax()    barrier()
+
+#endif /* __arch64__ */
+
+#endif /* _ASM_SPARC_VDSO_PROCESSOR_H */

-- 
2.50.1


Reply via email to