Commit-ID:  44f57d788e7deecb504843534081d3449c2eede9
Gitweb:     https://git.kernel.org/tip/44f57d788e7deecb504843534081d3449c2eede9
Author:     Vincenzo Frascino <vincenzo.frasc...@arm.com>
AuthorDate: Fri, 21 Jun 2019 10:52:30 +0100
Committer:  Thomas Gleixner <t...@linutronix.de>
CommitDate: Sat, 22 Jun 2019 21:21:06 +0200

timekeeping: Provide a generic update_vsyscall() implementation

The new generic VDSO library allows to unify the update_vsyscall[_tz]()
implementations.

Provide a generic implementation based on the x86 code and the bindings
which need to be implemented in architecture specific code.

[ tglx: Moved it into kernel/time where it belongs. Removed the pointless
        line breaks in the stub functions. Massaged changelog ]

Signed-off-by: Vincenzo Frascino <vincenzo.frasc...@arm.com>
Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Tested-by: Shijith Thotton <sthot...@marvell.com>
Tested-by: Andre Przywara <andre.przyw...@arm.com>
Cc: linux-a...@vger.kernel.org
Cc: linux-arm-ker...@lists.infradead.org
Cc: linux-m...@vger.kernel.org
Cc: linux-kselft...@vger.kernel.org
Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: Will Deacon <will.dea...@arm.com>
Cc: Arnd Bergmann <a...@arndb.de>
Cc: Russell King <li...@armlinux.org.uk>
Cc: Ralf Baechle <r...@linux-mips.org>
Cc: Paul Burton <paul.bur...@mips.com>
Cc: Daniel Lezcano <daniel.lezc...@linaro.org>
Cc: Mark Salyzyn <saly...@android.com>
Cc: Peter Collingbourne <p...@google.com>
Cc: Shuah Khan <sh...@kernel.org>
Cc: Dmitry Safonov <0x7f454...@gmail.com>
Cc: Rasmus Villemoes <li...@rasmusvillemoes.dk>
Cc: Huw Davies <h...@codeweavers.com>
Link: https://lkml.kernel.org/r/20190621095252.32307-4-vincenzo.frasc...@arm.com

---
 include/asm-generic/vdso/vsyscall.h |  50 ++++++++++++++
 include/vdso/vsyscall.h             |  11 +++
 kernel/time/Makefile                |   1 +
 kernel/time/vsyscall.c              | 133 ++++++++++++++++++++++++++++++++++++
 4 files changed, 195 insertions(+)

diff --git a/include/asm-generic/vdso/vsyscall.h 
b/include/asm-generic/vdso/vsyscall.h
new file mode 100644
index 000000000000..e94b19782c92
--- /dev/null
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_VSYSCALL_H
+#define __ASM_GENERIC_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#ifndef __arch_get_k_vdso_data
+static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
+{
+       return NULL;
+}
+#endif /* __arch_get_k_vdso_data */
+
+#ifndef __arch_update_vdso_data
+static __always_inline int __arch_update_vdso_data(void)
+{
+       return 0;
+}
+#endif /* __arch_update_vdso_data */
+
+#ifndef __arch_get_clock_mode
+static __always_inline int __arch_get_clock_mode(struct timekeeper *tk)
+{
+       return 0;
+}
+#endif /* __arch_get_clock_mode */
+
+#ifndef __arch_use_vsyscall
+static __always_inline int __arch_use_vsyscall(struct vdso_data *vdata)
+{
+       return 1;
+}
+#endif /* __arch_use_vsyscall */
+
+#ifndef __arch_update_vsyscall
+static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
+                                                  struct timekeeper *tk)
+{
+}
+#endif /* __arch_update_vsyscall */
+
+#ifndef __arch_sync_vdso_data
+static __always_inline void __arch_sync_vdso_data(struct vdso_data *vdata)
+{
+}
+#endif /* __arch_sync_vdso_data */
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_GENERIC_VSYSCALL_H */
diff --git a/include/vdso/vsyscall.h b/include/vdso/vsyscall.h
new file mode 100644
index 000000000000..2c6134e0c23d
--- /dev/null
+++ b/include/vdso/vsyscall.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_VSYSCALL_H
+#define __VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __VDSO_VSYSCALL_H */
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index f1e46f338a9c..1867044800bb 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -16,5 +16,6 @@ ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y)
 endif
 obj-$(CONFIG_GENERIC_SCHED_CLOCK)              += sched_clock.o
 obj-$(CONFIG_TICK_ONESHOT)                     += tick-oneshot.o tick-sched.o
+obj-$(CONFIG_HAVE_GENERIC_VDSO)                        += vsyscall.o
 obj-$(CONFIG_DEBUG_FS)                         += timekeeping_debug.o
 obj-$(CONFIG_TEST_UDELAY)                      += test_udelay.o
diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
new file mode 100644
index 000000000000..a80893180826
--- /dev/null
+++ b/kernel/time/vsyscall.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 ARM Ltd.
+ *
+ * Generic implementation of update_vsyscall and update_vsyscall_tz.
+ *
+ * Based on the x86 specific implementation.
+ */
+
+#include <linux/hrtimer.h>
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+#include <vdso/helpers.h>
+#include <vdso/vsyscall.h>
+
+static inline void update_vdso_data(struct vdso_data *vdata,
+                                   struct timekeeper *tk)
+{
+       struct vdso_timestamp *vdso_ts;
+       u64 nsec;
+
+       vdata[CS_HRES_COARSE].cycle_last        = tk->tkr_mono.cycle_last;
+       vdata[CS_HRES_COARSE].mask              = tk->tkr_mono.mask;
+       vdata[CS_HRES_COARSE].mult              = tk->tkr_mono.mult;
+       vdata[CS_HRES_COARSE].shift             = tk->tkr_mono.shift;
+       vdata[CS_RAW].cycle_last                = tk->tkr_raw.cycle_last;
+       vdata[CS_RAW].mask                      = tk->tkr_raw.mask;
+       vdata[CS_RAW].mult                      = tk->tkr_raw.mult;
+       vdata[CS_RAW].shift                     = tk->tkr_raw.shift;
+
+       /* CLOCK_REALTIME */
+       vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
+       vdso_ts->sec    = tk->xtime_sec;
+       vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
+
+       /* CLOCK_MONOTONIC */
+       vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
+       vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+
+       nsec = tk->tkr_mono.xtime_nsec;
+       nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
+       while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
+               nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
+               vdso_ts->sec++;
+       }
+       vdso_ts->nsec   = nsec;
+
+       /* CLOCK_MONOTONIC_RAW */
+       vdso_ts         = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
+       vdso_ts->sec    = tk->raw_sec;
+       vdso_ts->nsec   = tk->tkr_raw.xtime_nsec;
+
+       /* CLOCK_BOOTTIME */
+       vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
+       vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+       nsec = tk->tkr_mono.xtime_nsec;
+       nsec += ((u64)(tk->wall_to_monotonic.tv_nsec +
+                      ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift);
+       while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
+               nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
+               vdso_ts->sec++;
+       }
+       vdso_ts->nsec   = nsec;
+
+       /* CLOCK_TAI */
+       vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
+       vdso_ts->sec    = tk->xtime_sec + (s64)tk->tai_offset;
+       vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
+
+       /*
+        * Read without the seqlock held by clock_getres().
+        * Note: No need to have a second copy.
+        */
+       WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
+}
+
+void update_vsyscall(struct timekeeper *tk)
+{
+       struct vdso_data *vdata = __arch_get_k_vdso_data();
+       struct vdso_timestamp *vdso_ts;
+       u64 nsec;
+
+       if (__arch_update_vdso_data()) {
+               /*
+                * Some architectures might want to skip the update of the
+                * data page.
+                */
+               return;
+       }
+
+       /* copy vsyscall data */
+       vdso_write_begin(vdata);
+
+       vdata[CS_HRES_COARSE].clock_mode        = __arch_get_clock_mode(tk);
+       vdata[CS_RAW].clock_mode                = __arch_get_clock_mode(tk);
+
+       /* CLOCK_REALTIME_COARSE */
+       vdso_ts         = 
&vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
+       vdso_ts->sec    = tk->xtime_sec;
+       vdso_ts->nsec   = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
+
+       /* CLOCK_MONOTONIC_COARSE */
+       vdso_ts         = 
&vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE];
+       vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+       nsec            = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
+       nsec            = nsec + tk->wall_to_monotonic.tv_nsec;
+       while (nsec >= NSEC_PER_SEC) {
+               nsec = nsec - NSEC_PER_SEC;
+               vdso_ts->sec++;
+       }
+       vdso_ts->nsec   = nsec;
+
+       if (__arch_use_vsyscall(vdata))
+               update_vdso_data(vdata, tk);
+
+       __arch_update_vsyscall(vdata, tk);
+
+       vdso_write_end(vdata);
+
+       __arch_sync_vdso_data(vdata);
+}
+
+void update_vsyscall_tz(void)
+{
+       struct vdso_data *vdata = __arch_get_k_vdso_data();
+
+       if (__arch_use_vsyscall(vdata)) {
+               vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
+               vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
+       }
+
+       __arch_sync_vdso_data(vdata);
+}

Reply via email to