As the last users of CONFIG_GENERIC_TIME_VSYSCALL_OLD have been updated,
the support can be removed from the timekeeping code.

Cc: John Stultz <john.stu...@linaro.org>
Cc: Prarit Bhargava <pra...@redhat.com>
Cc: Richard Cochran <richardcoch...@gmail.com>
Signed-off-by: Miroslav Lichvar <mlich...@redhat.com>
---
 include/linux/timekeeper_internal.h |  7 ------
 kernel/time/Kconfig                 |  4 ----
 kernel/time/timekeeping.c           | 44 -------------------------------------
 3 files changed, 55 deletions(-)

diff --git a/include/linux/timekeeper_internal.h 
b/include/linux/timekeeper_internal.h
index 110f453..b7ae5b0 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -132,13 +132,6 @@ struct timekeeper {
 extern void update_vsyscall(struct timekeeper *tk);
 extern void update_vsyscall_tz(void);
 
-#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
-
-extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
-                               struct clocksource *c, u32 mult,
-                               u64 cycle_last);
-extern void update_vsyscall_tz(void);
-
 #else
 
 static inline void update_vsyscall(struct timekeeper *tk)
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 4008d9f..55d61a3 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -21,10 +21,6 @@ config CLOCKSOURCE_VALIDATE_LAST_CYCLE
 config GENERIC_TIME_VSYSCALL
        bool
 
-# Timekeeping vsyscall support
-config GENERIC_TIME_VSYSCALL_OLD
-       bool
-
 # Old style timekeeping
 config ARCH_USES_GETTIMEOFFSET
        bool
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 8fd77c6..ff542dd 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -487,44 +487,6 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
        update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
 }
 
-#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
-
-static inline void update_vsyscall(struct timekeeper *tk)
-{
-       struct timespec xt, wm;
-
-       xt = timespec64_to_timespec(tk_xtime(tk));
-       wm = timespec64_to_timespec(tk->wall_to_monotonic);
-       update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
-                           tk->tkr_mono.cycle_last);
-}
-
-static inline void old_vsyscall_fixup(struct timekeeper *tk)
-{
-       s64 remainder;
-
-       /*
-       * Store only full nanoseconds into xtime_nsec after rounding
-       * it up and add the remainder to the error difference.
-       * XXX - This is necessary to avoid small 1ns inconsistnecies caused
-       * by truncating the remainder in vsyscalls. However, it causes
-       * additional work to be done in timekeeping_adjust(). Once
-       * the vsyscall implementations are converted to use xtime_nsec
-       * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
-       * users are removed, this can be killed.
-       */
-       remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 
1);
-       if (remainder != 0) {
-               tk->tkr_mono.xtime_nsec -= remainder;
-               tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
-               tk->ntp_error += remainder << tk->ntp_error_shift;
-               tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << 
tk->ntp_error_shift;
-       }
-}
-#else
-#define old_vsyscall_fixup(tk)
-#endif
-
 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
 
 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
@@ -2065,12 +2027,6 @@ void update_wall_time(void)
        timekeeping_adjust(tk, offset);
 
        /*
-        * XXX This can be killed once everyone converts
-        * to the new update_vsyscall.
-        */
-       old_vsyscall_fixup(tk);
-
-       /*
         * Finally, make sure that after the rounding
         * xtime_nsec isn't larger than NSEC_PER_SEC
         */
-- 
2.9.3

Reply via email to