[PATCH 11/12] clocksource: remove update_callback
Uses the block notifier to replace the functionality of update_callback(). update_callback() was a special case specifically for the tsc, but including it in the clocksource structure duplicated it needlessly for other clocks. Signed-Off-By: Daniel Walker <[EMAIL PROTECTED]> --- arch/i386/kernel/tsc.c | 35 ++- arch/x86_64/kernel/tsc.c| 19 +-- include/linux/clocksource.h |2 -- include/linux/timekeeping.h |1 + kernel/time/timekeeping.c | 32 ++-- 5 files changed, 46 insertions(+), 43 deletions(-) Index: linux-2.6.19/arch/i386/kernel/tsc.c === --- linux-2.6.19.orig/arch/i386/kernel/tsc.c +++ linux-2.6.19/arch/i386/kernel/tsc.c @@ -51,8 +51,7 @@ static int __init tsc_setup(char *str) __setup("notsc", tsc_setup); /* - * code to mark and check if the TSC is unstable - * due to cpufreq or due to unsynced TSCs + * Flag that denotes an unstable tsc and check function. */ static int tsc_unstable; @@ -61,12 +60,6 @@ static inline int check_tsc_unstable(voi return tsc_unstable; } -void mark_tsc_unstable(void) -{ - tsc_unstable = 1; -} -EXPORT_SYMBOL_GPL(mark_tsc_unstable); - /* Accellerators for sched_clock() * convert from cycles(64bits) => nanoseconds (64bits) * basic equation: @@ -180,6 +173,7 @@ int recalibrate_cpu_khz(void) if (cpu_has_tsc) { cpu_khz = calculate_cpu_khz(); tsc_khz = cpu_khz; + mark_tsc_unstable(); cpu_data[0].loops_per_jiffy = cpufreq_scale(cpu_data[0].loops_per_jiffy, cpu_khz_old, cpu_khz); @@ -332,7 +326,6 @@ core_initcall(cpufreq_tsc); /* clock source code */ static unsigned long current_tsc_khz = 0; -static int tsc_update_callback(void); static cycle_t read_tsc(void) { @@ -350,32 +343,24 @@ static struct clocksource clocksource_ts .mask = CLOCKSOURCE_MASK(64), .mult = 0, /* to be set */ .shift = 22, - .update_callback= tsc_update_callback, .is_continuous = 1, .list = LIST_HEAD_INIT(clocksource_tsc.list), }; -static int tsc_update_callback(void) +/* + * code to mark if the TSC is unstable + * due to cpufreq or due to unsynced TSCs + */ +void mark_tsc_unstable(void) { - int change = 0; - /* check to see if we should switch to the safe clocksource: */ - if (clocksource_tsc.rating != 0 && check_tsc_unstable()) { + if (unlikely(!tsc_unstable && clocksource_tsc.rating != 0)) { clocksource_tsc.rating = 0; clocksource_rating_change(_tsc); - change = 1; - } - - /* only update if tsc_khz has changed: */ - if (current_tsc_khz != tsc_khz) { - current_tsc_khz = tsc_khz; - clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, - clocksource_tsc.shift); - change = 1; } - - return change; + tsc_unstable = 1; } +EXPORT_SYMBOL_GPL(mark_tsc_unstable); static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d) { Index: linux-2.6.19/arch/x86_64/kernel/tsc.c === --- linux-2.6.19.orig/arch/x86_64/kernel/tsc.c +++ linux-2.6.19/arch/x86_64/kernel/tsc.c @@ -47,11 +47,6 @@ static inline int check_tsc_unstable(voi return tsc_unstable; } -void mark_tsc_unstable(void) -{ - tsc_unstable = 1; -} -EXPORT_SYMBOL_GPL(mark_tsc_unstable); #ifdef CONFIG_CPU_FREQ @@ -185,8 +180,6 @@ __setup("notsc", notsc_setup); /* clock source code: */ -static int tsc_update_callback(void); - static cycle_t read_tsc(void) { cycle_t ret = (cycle_t)get_cycles_sync(); @@ -206,24 +199,22 @@ static struct clocksource clocksource_ts .mask = (cycle_t)-1, .mult = 0, /* to be set */ .shift = 22, - .update_callback= tsc_update_callback, .is_continuous = 1, .vread = vread_tsc, .list = LIST_HEAD_INIT(clocksource_tsc.list), }; -static int tsc_update_callback(void) +void mark_tsc_unstable(void) { - int change = 0; - /* check to see if we should switch to the safe clocksource: */ - if (clocksource_tsc.rating != 50 && check_tsc_unstable()) { + if (unlikely(!tsc_unstable && clocksource_tsc.rating != 50)) { clocksource_tsc.rating = 50; clocksource_rating_change(_tsc); - change = 1; } - return change; + + tsc_unstable = 1; } +EXPORT_SYMBOL_GPL(mark_tsc_unstable); static int __init init_tsc_clocksource(void) {
[PATCH 11/12] clocksource: remove update_callback
Uses the block notifier to replace the functionality of update_callback(). update_callback() was a special case specifically for the tsc, but including it in the clocksource structure duplicated it needlessly for other clocks. Signed-Off-By: Daniel Walker [EMAIL PROTECTED] --- arch/i386/kernel/tsc.c | 35 ++- arch/x86_64/kernel/tsc.c| 19 +-- include/linux/clocksource.h |2 -- include/linux/timekeeping.h |1 + kernel/time/timekeeping.c | 32 ++-- 5 files changed, 46 insertions(+), 43 deletions(-) Index: linux-2.6.19/arch/i386/kernel/tsc.c === --- linux-2.6.19.orig/arch/i386/kernel/tsc.c +++ linux-2.6.19/arch/i386/kernel/tsc.c @@ -51,8 +51,7 @@ static int __init tsc_setup(char *str) __setup(notsc, tsc_setup); /* - * code to mark and check if the TSC is unstable - * due to cpufreq or due to unsynced TSCs + * Flag that denotes an unstable tsc and check function. */ static int tsc_unstable; @@ -61,12 +60,6 @@ static inline int check_tsc_unstable(voi return tsc_unstable; } -void mark_tsc_unstable(void) -{ - tsc_unstable = 1; -} -EXPORT_SYMBOL_GPL(mark_tsc_unstable); - /* Accellerators for sched_clock() * convert from cycles(64bits) = nanoseconds (64bits) * basic equation: @@ -180,6 +173,7 @@ int recalibrate_cpu_khz(void) if (cpu_has_tsc) { cpu_khz = calculate_cpu_khz(); tsc_khz = cpu_khz; + mark_tsc_unstable(); cpu_data[0].loops_per_jiffy = cpufreq_scale(cpu_data[0].loops_per_jiffy, cpu_khz_old, cpu_khz); @@ -332,7 +326,6 @@ core_initcall(cpufreq_tsc); /* clock source code */ static unsigned long current_tsc_khz = 0; -static int tsc_update_callback(void); static cycle_t read_tsc(void) { @@ -350,32 +343,24 @@ static struct clocksource clocksource_ts .mask = CLOCKSOURCE_MASK(64), .mult = 0, /* to be set */ .shift = 22, - .update_callback= tsc_update_callback, .is_continuous = 1, .list = LIST_HEAD_INIT(clocksource_tsc.list), }; -static int tsc_update_callback(void) +/* + * code to mark if the TSC is unstable + * due to cpufreq or due to unsynced TSCs + */ +void mark_tsc_unstable(void) { - int change = 0; - /* check to see if we should switch to the safe clocksource: */ - if (clocksource_tsc.rating != 0 check_tsc_unstable()) { + if (unlikely(!tsc_unstable clocksource_tsc.rating != 0)) { clocksource_tsc.rating = 0; clocksource_rating_change(clocksource_tsc); - change = 1; - } - - /* only update if tsc_khz has changed: */ - if (current_tsc_khz != tsc_khz) { - current_tsc_khz = tsc_khz; - clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, - clocksource_tsc.shift); - change = 1; } - - return change; + tsc_unstable = 1; } +EXPORT_SYMBOL_GPL(mark_tsc_unstable); static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d) { Index: linux-2.6.19/arch/x86_64/kernel/tsc.c === --- linux-2.6.19.orig/arch/x86_64/kernel/tsc.c +++ linux-2.6.19/arch/x86_64/kernel/tsc.c @@ -47,11 +47,6 @@ static inline int check_tsc_unstable(voi return tsc_unstable; } -void mark_tsc_unstable(void) -{ - tsc_unstable = 1; -} -EXPORT_SYMBOL_GPL(mark_tsc_unstable); #ifdef CONFIG_CPU_FREQ @@ -185,8 +180,6 @@ __setup(notsc, notsc_setup); /* clock source code: */ -static int tsc_update_callback(void); - static cycle_t read_tsc(void) { cycle_t ret = (cycle_t)get_cycles_sync(); @@ -206,24 +199,22 @@ static struct clocksource clocksource_ts .mask = (cycle_t)-1, .mult = 0, /* to be set */ .shift = 22, - .update_callback= tsc_update_callback, .is_continuous = 1, .vread = vread_tsc, .list = LIST_HEAD_INIT(clocksource_tsc.list), }; -static int tsc_update_callback(void) +void mark_tsc_unstable(void) { - int change = 0; - /* check to see if we should switch to the safe clocksource: */ - if (clocksource_tsc.rating != 50 check_tsc_unstable()) { + if (unlikely(!tsc_unstable clocksource_tsc.rating != 50)) { clocksource_tsc.rating = 50; clocksource_rating_change(clocksource_tsc); - change = 1; } - return change; + + tsc_unstable = 1; } +EXPORT_SYMBOL_GPL(mark_tsc_unstable); static int __init init_tsc_clocksource(void)