[PATCH 06/22 -v7] handle accurate time keeping over long delays

2008-01-29 Thread Steven Rostedt
Handle accurate time even if there's a long delay between
accumulated clock cycles.

Signed-off-by: John Stultz <[EMAIL PROTECTED]>
Signed-off-by: Steven Rostedt <[EMAIL PROTECTED]>
---
 arch/powerpc/kernel/time.c|3 +-
 arch/x86/kernel/vsyscall_64.c |5 ++-
 include/asm-x86/vgtod.h   |2 -
 include/linux/clocksource.h   |   58 --
 kernel/time/timekeeping.c |   36 +-
 5 files changed, 82 insertions(+), 22 deletions(-)

Index: linux-mcount.git/arch/x86/kernel/vsyscall_64.c
===
--- linux-mcount.git.orig/arch/x86/kernel/vsyscall_64.c 2008-01-25 
21:47:06.0 -0500
+++ linux-mcount.git/arch/x86/kernel/vsyscall_64.c  2008-01-25 
21:47:09.0 -0500
@@ -86,6 +86,7 @@ void update_vsyscall(struct timespec *wa
vsyscall_gtod_data.clock.mask = clock->mask;
vsyscall_gtod_data.clock.mult = clock->mult;
vsyscall_gtod_data.clock.shift = clock->shift;
+   vsyscall_gtod_data.clock.cycle_accumulated = clock->cycle_accumulated;
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
@@ -121,7 +122,7 @@ static __always_inline long time_syscall
 
 static __always_inline void do_vgettimeofday(struct timeval * tv)
 {
-   cycle_t now, base, mask, cycle_delta;
+   cycle_t now, base, accumulated, mask, cycle_delta;
unsigned seq;
unsigned long mult, shift, nsec;
cycle_t (*vread)(void);
@@ -135,6 +136,7 @@ static __always_inline void do_vgettimeo
}
now = vread();
base = __vsyscall_gtod_data.clock.cycle_last;
+   accumulated  = __vsyscall_gtod_data.clock.cycle_accumulated;
mask = __vsyscall_gtod_data.clock.mask;
mult = __vsyscall_gtod_data.clock.mult;
shift = __vsyscall_gtod_data.clock.shift;
@@ -145,6 +147,7 @@ static __always_inline void do_vgettimeo
 
/* calculate interval: */
cycle_delta = (now - base) & mask;
+   cycle_delta += accumulated;
/* convert to nsecs: */
nsec += (cycle_delta * mult) >> shift;
 
Index: linux-mcount.git/include/asm-x86/vgtod.h
===
--- linux-mcount.git.orig/include/asm-x86/vgtod.h   2008-01-25 
21:46:50.0 -0500
+++ linux-mcount.git/include/asm-x86/vgtod.h2008-01-25 21:47:09.0 
-0500
@@ -15,7 +15,7 @@ struct vsyscall_gtod_data {
struct timezone sys_tz;
struct { /* extract of a clocksource struct */
cycle_t (*vread)(void);
-   cycle_t cycle_last;
+   cycle_t cycle_last, cycle_accumulated;
cycle_t mask;
u32 mult;
u32 shift;
Index: linux-mcount.git/include/linux/clocksource.h
===
--- linux-mcount.git.orig/include/linux/clocksource.h   2008-01-25 
21:46:50.0 -0500
+++ linux-mcount.git/include/linux/clocksource.h2008-01-25 
21:47:09.0 -0500
@@ -50,8 +50,12 @@ struct clocksource;
  * @flags: flags describing special properties
  * @vread: vsyscall based read
  * @resume:resume function for the clocksource, if necessary
+ * @cycle_last:Used internally by timekeeping core, please 
ignore.
+ * @cycle_accumulated: Used internally by timekeeping core, please ignore.
  * @cycle_interval:Used internally by timekeeping core, please ignore.
  * @xtime_interval:Used internally by timekeeping core, please ignore.
+ * @xtime_nsec:Used internally by timekeeping core, please 
ignore.
+ * @error: Used internally by timekeeping core, please ignore.
  */
 struct clocksource {
/*
@@ -82,7 +86,10 @@ struct clocksource {
 * Keep it in a different cache line to dirty no
 * more than one cache line.
 */
-   cycle_t cycle_last cacheline_aligned_in_smp;
+   struct {
+   cycle_t cycle_last, cycle_accumulated;
+   } cacheline_aligned_in_smp;
+
u64 xtime_nsec;
s64 error;
 
@@ -168,11 +175,44 @@ static inline cycle_t clocksource_read(s
 }
 
 /**
+ * clocksource_get_cycles: - Access the clocksource's accumulated cycle value
+ * @cs:pointer to clocksource being read
+ * @now:   current cycle value
+ *
+ * Uses the clocksource to return the current cycle_t value.
+ * NOTE!!!: This is different from clocksource_read, because it
+ * returns the accumulated cycle value! Must hold xtime lock!
+ */
+static inline cycle_t
+clocksource_get_cycles(struct clocksource *cs, cycle_t now)
+{
+   cycle_t offset = (now - cs->cycle_last) & cs->mask;
+   offset += cs->cycle_accumulated;
+   

[PATCH 06/22 -v7] handle accurate time keeping over long delays

2008-01-29 Thread Steven Rostedt
Handle accurate time even if there's a long delay between
accumulated clock cycles.

Signed-off-by: John Stultz [EMAIL PROTECTED]
Signed-off-by: Steven Rostedt [EMAIL PROTECTED]
---
 arch/powerpc/kernel/time.c|3 +-
 arch/x86/kernel/vsyscall_64.c |5 ++-
 include/asm-x86/vgtod.h   |2 -
 include/linux/clocksource.h   |   58 --
 kernel/time/timekeeping.c |   36 +-
 5 files changed, 82 insertions(+), 22 deletions(-)

Index: linux-mcount.git/arch/x86/kernel/vsyscall_64.c
===
--- linux-mcount.git.orig/arch/x86/kernel/vsyscall_64.c 2008-01-25 
21:47:06.0 -0500
+++ linux-mcount.git/arch/x86/kernel/vsyscall_64.c  2008-01-25 
21:47:09.0 -0500
@@ -86,6 +86,7 @@ void update_vsyscall(struct timespec *wa
vsyscall_gtod_data.clock.mask = clock-mask;
vsyscall_gtod_data.clock.mult = clock-mult;
vsyscall_gtod_data.clock.shift = clock-shift;
+   vsyscall_gtod_data.clock.cycle_accumulated = clock-cycle_accumulated;
vsyscall_gtod_data.wall_time_sec = wall_time-tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time-tv_nsec;
vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
@@ -121,7 +122,7 @@ static __always_inline long time_syscall
 
 static __always_inline void do_vgettimeofday(struct timeval * tv)
 {
-   cycle_t now, base, mask, cycle_delta;
+   cycle_t now, base, accumulated, mask, cycle_delta;
unsigned seq;
unsigned long mult, shift, nsec;
cycle_t (*vread)(void);
@@ -135,6 +136,7 @@ static __always_inline void do_vgettimeo
}
now = vread();
base = __vsyscall_gtod_data.clock.cycle_last;
+   accumulated  = __vsyscall_gtod_data.clock.cycle_accumulated;
mask = __vsyscall_gtod_data.clock.mask;
mult = __vsyscall_gtod_data.clock.mult;
shift = __vsyscall_gtod_data.clock.shift;
@@ -145,6 +147,7 @@ static __always_inline void do_vgettimeo
 
/* calculate interval: */
cycle_delta = (now - base)  mask;
+   cycle_delta += accumulated;
/* convert to nsecs: */
nsec += (cycle_delta * mult)  shift;
 
Index: linux-mcount.git/include/asm-x86/vgtod.h
===
--- linux-mcount.git.orig/include/asm-x86/vgtod.h   2008-01-25 
21:46:50.0 -0500
+++ linux-mcount.git/include/asm-x86/vgtod.h2008-01-25 21:47:09.0 
-0500
@@ -15,7 +15,7 @@ struct vsyscall_gtod_data {
struct timezone sys_tz;
struct { /* extract of a clocksource struct */
cycle_t (*vread)(void);
-   cycle_t cycle_last;
+   cycle_t cycle_last, cycle_accumulated;
cycle_t mask;
u32 mult;
u32 shift;
Index: linux-mcount.git/include/linux/clocksource.h
===
--- linux-mcount.git.orig/include/linux/clocksource.h   2008-01-25 
21:46:50.0 -0500
+++ linux-mcount.git/include/linux/clocksource.h2008-01-25 
21:47:09.0 -0500
@@ -50,8 +50,12 @@ struct clocksource;
  * @flags: flags describing special properties
  * @vread: vsyscall based read
  * @resume:resume function for the clocksource, if necessary
+ * @cycle_last:Used internally by timekeeping core, please 
ignore.
+ * @cycle_accumulated: Used internally by timekeeping core, please ignore.
  * @cycle_interval:Used internally by timekeeping core, please ignore.
  * @xtime_interval:Used internally by timekeeping core, please ignore.
+ * @xtime_nsec:Used internally by timekeeping core, please 
ignore.
+ * @error: Used internally by timekeeping core, please ignore.
  */
 struct clocksource {
/*
@@ -82,7 +86,10 @@ struct clocksource {
 * Keep it in a different cache line to dirty no
 * more than one cache line.
 */
-   cycle_t cycle_last cacheline_aligned_in_smp;
+   struct {
+   cycle_t cycle_last, cycle_accumulated;
+   } cacheline_aligned_in_smp;
+
u64 xtime_nsec;
s64 error;
 
@@ -168,11 +175,44 @@ static inline cycle_t clocksource_read(s
 }
 
 /**
+ * clocksource_get_cycles: - Access the clocksource's accumulated cycle value
+ * @cs:pointer to clocksource being read
+ * @now:   current cycle value
+ *
+ * Uses the clocksource to return the current cycle_t value.
+ * NOTE!!!: This is different from clocksource_read, because it
+ * returns the accumulated cycle value! Must hold xtime lock!
+ */
+static inline cycle_t
+clocksource_get_cycles(struct clocksource *cs, cycle_t now)
+{
+   cycle_t offset = (now - cs-cycle_last)  cs-mask;
+   offset += cs-cycle_accumulated;
+   return