[PATCH v3] x86/vdso: Handle clock_gettime(CLOCK_TAI) in vDSO

2018-08-31 Thread Matt Rickard
Process clock_gettime(CLOCK_TAI) in vDSO.
This makes the call about as fast as CLOCK_REALTIME and CLOCK_MONOTONIC:

  nanoseconds
 before after clockname
    - -
23387 CLOCK_TAI
 9693 CLOCK_REALTIME
 8887 CLOCK_MONOTONIC

Signed-off-by: Matt Rickard 
---
 arch/x86/entry/vdso/vclock_gettime.c| 25 +
 arch/x86/entry/vsyscall/vsyscall_gtod.c |  2 ++
 arch/x86/include/asm/vgtod.h|  1 +
 3 files changed, 28 insertions(+)

diff --git a/arch/x86/entry/vdso/vclock_gettime.c 
b/arch/x86/entry/vdso/vclock_gettime.c
index f19856d95c60..91ed1bb2a3bb 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -246,6 +246,27 @@ notrace static int __always_inline do_monotonic(struct 
timespec *ts)
return mode;
 }
 
+notrace static int __always_inline do_tai(struct timespec *ts)
+{
+   unsigned long seq;
+   u64 ns;
+   int mode;
+
+   do {
+   seq = gtod_read_begin(gtod);
+   mode = gtod->vclock_mode;
+   ts->tv_sec = gtod->tai_time_sec;
+   ns = gtod->wall_time_snsec;
+   ns += vgetsns(&mode);
+   ns >>= gtod->shift;
+   } while (unlikely(gtod_read_retry(gtod, seq)));
+
+   ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+   ts->tv_nsec = ns;
+
+   return mode;
+}
+
 notrace static void do_realtime_coarse(struct timespec *ts)
 {
unsigned long seq;
@@ -277,6 +298,10 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct 
timespec *ts)
if (do_monotonic(ts) == VCLOCK_NONE)
goto fallback;
break;
+   case CLOCK_TAI:
+   if (do_tai(ts) == VCLOCK_NONE)
+   goto fallback;
+   break;
case CLOCK_REALTIME_COARSE:
do_realtime_coarse(ts);
break;
diff --git a/arch/x86/entry/vsyscall/vsyscall_gtod.c 
b/arch/x86/entry/vsyscall/vsyscall_gtod.c
index e1216dd95c04..d61392fe17f6 100644
--- a/arch/x86/entry/vsyscall/vsyscall_gtod.c
+++ b/arch/x86/entry/vsyscall/vsyscall_gtod.c
@@ -53,6 +53,8 @@ void update_vsyscall(struct timekeeper *tk)
vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec
+ ((u64)tk->wall_to_monotonic.tv_nsec
<< tk->tkr_mono.shift);
+   vdata->tai_time_sec = tk->xtime_sec
+   + tk->tai_offset;
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << 
tk->tkr_mono.shift)) {
vdata->monotonic_time_snsec -=
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index fb856c9f0449..adc9f7b20b9c 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -32,6 +32,7 @@ struct vsyscall_gtod_data {
gtod_long_t wall_time_coarse_nsec;
gtod_long_t monotonic_time_coarse_sec;
gtod_long_t monotonic_time_coarse_nsec;
+   gtod_long_t tai_time_sec;
 
int tz_minuteswest;
int tz_dsttime;


[RESEND PATCH] x86/vdso: Handle clock_gettime(CLOCK_TAI) in vDSO

2018-08-17 Thread Matt Rickard
Process clock_gettime(CLOCK_TAI) in vDSO. This makes the call about as fast as
CLOCK_REALTIME instead of taking about four times as long.

Signed-off-by: Matt Rickard 
---
 arch/x86/entry/vdso/vclock_gettime.c| 25 +
 arch/x86/entry/vsyscall/vsyscall_gtod.c |  2 ++
 arch/x86/include/asm/vgtod.h|  1 +
 3 files changed, 28 insertions(+)

diff --git a/arch/x86/entry/vdso/vclock_gettime.c 
b/arch/x86/entry/vdso/vclock_gettime.c
index f19856d95c60..91ed1bb2a3bb 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -246,6 +246,27 @@ notrace static int __always_inline do_monotonic(struct 
timespec *ts)
return mode;
 }
 
+notrace static int __always_inline do_tai(struct timespec *ts)
+{
+   unsigned long seq;
+   u64 ns;
+   int mode;
+
+   do {
+   seq = gtod_read_begin(gtod);
+   mode = gtod->vclock_mode;
+   ts->tv_sec = gtod->tai_time_sec;
+   ns = gtod->wall_time_snsec;
+   ns += vgetsns(&mode);
+   ns >>= gtod->shift;
+   } while (unlikely(gtod_read_retry(gtod, seq)));
+
+   ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+   ts->tv_nsec = ns;
+
+   return mode;
+}
+
 notrace static void do_realtime_coarse(struct timespec *ts)
 {
unsigned long seq;
@@ -277,6 +298,10 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct 
timespec *ts)
if (do_monotonic(ts) == VCLOCK_NONE)
goto fallback;
break;
+   case CLOCK_TAI:
+   if (do_tai(ts) == VCLOCK_NONE)
+   goto fallback;
+   break;
case CLOCK_REALTIME_COARSE:
do_realtime_coarse(ts);
break;
diff --git a/arch/x86/entry/vsyscall/vsyscall_gtod.c 
b/arch/x86/entry/vsyscall/vsyscall_gtod.c
index e1216dd95c04..d61392fe17f6 100644
--- a/arch/x86/entry/vsyscall/vsyscall_gtod.c
+++ b/arch/x86/entry/vsyscall/vsyscall_gtod.c
@@ -53,6 +53,8 @@ void update_vsyscall(struct timekeeper *tk)
vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec
+ ((u64)tk->wall_to_monotonic.tv_nsec
<< tk->tkr_mono.shift);
+   vdata->tai_time_sec = tk->xtime_sec
+   + tk->tai_offset;
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << 
tk->tkr_mono.shift)) {
vdata->monotonic_time_snsec -=
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index fb856c9f0449..adc9f7b20b9c 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -32,6 +32,7 @@ struct vsyscall_gtod_data {
gtod_long_t wall_time_coarse_nsec;
gtod_long_t monotonic_time_coarse_sec;
gtod_long_t monotonic_time_coarse_nsec;
+   gtod_long_t tai_time_sec;
 
int tz_minuteswest;
int tz_dsttime;


[RESEND PATCH] x86/vdso: Handle clock_gettime(CLOCK_TAI) in vDSO

2018-08-17 Thread Matt Rickard
Process clock_gettime(CLOCK_TAI) in vDSO. This makes the call about as fast as
CLOCK_REALTIME instead of taking about four times as long.

Signed-off-by: Matt Rickard 
---
 0 files changed