The generic vDSO libraries expected the architecture glue around hardware
counter reading in asm/vdso/gettimeofday.h. To prepare the adoption of the
generic library, move the existing functions there.

While at it, perform some trivial alignment with the generic vDSO library:
* Drop 'notrace', as the functions are __always_inline anyways
* Use the same parameter types
* Use the same function names

Signed-off-by: Thomas Weißschuh <thomas.weisssc...@linutronix.de>
---
 arch/sparc/include/asm/vdso/gettimeofday.h | 78 ++++++++++++++++++++++++++++++
 arch/sparc/vdso/vclock_gettime.c           | 70 ++-------------------------
 2 files changed, 82 insertions(+), 66 deletions(-)

diff --git a/arch/sparc/include/asm/vdso/gettimeofday.h 
b/arch/sparc/include/asm/vdso/gettimeofday.h
new file mode 100644
index 
0000000000000000000000000000000000000000..31f6505d3ab5dde9e02eca6da9182e5fb91031c4
--- /dev/null
+++ b/arch/sparc/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2006 Andi Kleen, SUSE Labs.
+ */
+
+#ifndef _ASM_SPARC_VDSO_GETTIMEOFDAY_H
+#define _ASM_SPARC_VDSO_GETTIMEOFDAY_H
+
+#include <linux/types.h>
+#include <asm/vvar.h>
+
+#ifdef CONFIG_SPARC64
+static __always_inline u64 vdso_shift_ns(u64 val, u32 amt)
+{
+       return val >> amt;
+}
+
+static __always_inline u64 vread_tick(void)
+{
+       u64     ret;
+
+       __asm__ __volatile__("rd %%tick, %0" : "=r" (ret));
+       return ret;
+}
+
+static __always_inline u64 vread_tick_stick(void)
+{
+       u64     ret;
+
+       __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret));
+       return ret;
+}
+#else
+static __always_inline u64 vdso_shift_ns(u64 val, u32 amt)
+{
+       u64 ret;
+
+       __asm__ __volatile__("sllx %H1, 32, %%g1\n\t"
+                            "srl %L1, 0, %L1\n\t"
+                            "or %%g1, %L1, %%g1\n\t"
+                            "srlx %%g1, %2, %L0\n\t"
+                            "srlx %L0, 32, %H0"
+                            : "=r" (ret)
+                            : "r" (val), "r" (amt)
+                            : "g1");
+       return ret;
+}
+
+static __always_inline u64 vread_tick(void)
+{
+       register unsigned long long ret asm("o4");
+
+       __asm__ __volatile__("rd %%tick, %L0\n\t"
+                            "srlx %L0, 32, %H0"
+                            : "=r" (ret));
+       return ret;
+}
+
+static __always_inline u64 vread_tick_stick(void)
+{
+       register unsigned long long ret asm("o4");
+
+       __asm__ __volatile__("rd %%asr24, %L0\n\t"
+                            "srlx %L0, 32, %H0"
+                            : "=r" (ret));
+       return ret;
+}
+#endif
+
+static __always_inline u64 __arch_get_hw_counter(struct vvar_data *vvar)
+{
+       if (likely(vvar->vclock_mode == VCLOCK_STICK))
+               return vread_tick_stick();
+       else
+               return vread_tick();
+}
+
+#endif /* _ASM_SPARC_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
index 
643608bffe13d904c5f77edd585b2e58277491fb..16ac80982a00b9f965453b89a0cc111312baa9b2
 100644
--- a/arch/sparc/vdso/vclock_gettime.c
+++ b/arch/sparc/vdso/vclock_gettime.c
@@ -19,6 +19,7 @@
 #include <asm/unistd.h>
 #include <asm/timex.h>
 #include <asm/clocksource.h>
+#include <asm/vdso/gettimeofday.h>
 #include <asm/vvar.h>
 
 #ifdef CONFIG_SPARC64
@@ -85,73 +86,10 @@ notrace static long vdso_fallback_gettimeofday(struct 
__kernel_old_timeval *tv,
        return o0;
 }
 
-#ifdef CONFIG_SPARC64
-notrace static __always_inline u64 __shr64(u64 val, int amt)
-{
-       return val >> amt;
-}
-
-notrace static __always_inline u64 vread_tick(void)
-{
-       u64     ret;
-
-       __asm__ __volatile__("rd %%tick, %0" : "=r" (ret));
-       return ret;
-}
-
-notrace static __always_inline u64 vread_tick_stick(void)
-{
-       u64     ret;
-
-       __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret));
-       return ret;
-}
-#else
-notrace static __always_inline u64 __shr64(u64 val, int amt)
-{
-       u64 ret;
-
-       __asm__ __volatile__("sllx %H1, 32, %%g1\n\t"
-                            "srl %L1, 0, %L1\n\t"
-                            "or %%g1, %L1, %%g1\n\t"
-                            "srlx %%g1, %2, %L0\n\t"
-                            "srlx %L0, 32, %H0"
-                            : "=r" (ret)
-                            : "r" (val), "r" (amt)
-                            : "g1");
-       return ret;
-}
-
-notrace static __always_inline u64 vread_tick(void)
-{
-       register unsigned long long ret asm("o4");
-
-       __asm__ __volatile__("rd %%tick, %L0\n\t"
-                            "srlx %L0, 32, %H0"
-                            : "=r" (ret));
-       return ret;
-}
-
-notrace static __always_inline u64 vread_tick_stick(void)
-{
-       register unsigned long long ret asm("o4");
-
-       __asm__ __volatile__("rd %%asr24, %L0\n\t"
-                            "srlx %L0, 32, %H0"
-                            : "=r" (ret));
-       return ret;
-}
-#endif
-
 notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
 {
        u64 v;
-       u64 cycles;
-
-       if (likely(vvar->vclock_mode == VCLOCK_STICK))
-               cycles = vread_tick_stick();
-       else
-               cycles = vread_tick();
+       u64 cycles = __arch_get_hw_counter(vvar);
 
        v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
        return v * vvar->clock.mult;
@@ -168,7 +106,7 @@ notrace static __always_inline int do_realtime(struct 
vvar_data *vvar,
                ts->tv_sec = vvar->wall_time_sec;
                ns = vvar->wall_time_snsec;
                ns += vgetsns(vvar);
-               ns = __shr64(ns, vvar->clock.shift);
+               ns = vdso_shift_ns(ns, vvar->clock.shift);
        } while (unlikely(vvar_read_retry(vvar, seq)));
 
        ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
@@ -188,7 +126,7 @@ notrace static __always_inline int do_monotonic(struct 
vvar_data *vvar,
                ts->tv_sec = vvar->monotonic_time_sec;
                ns = vvar->monotonic_time_snsec;
                ns += vgetsns(vvar);
-               ns = __shr64(ns, vvar->clock.shift);
+               ns = vdso_shift_ns(ns, vvar->clock.shift);
        } while (unlikely(vvar_read_retry(vvar, seq)));
 
        ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);

-- 
2.51.0


Reply via email to