Hi Feng,

Using a global variable for this is not going to work, because you are adding a 
conditional branch and a load to a very hot path for the live of the system, 
not only for the duration of the boot.

Pavel

>  
> +int tsc_inited;
>  /*
>   * TSC can be unstable due to cpufreq or due to unsynced TSCs
>   */
> @@ -192,7 +193,7 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu, 
> unsigned long long tsc_
>   */
>  u64 native_sched_clock(void)
>  {
> -     if (static_branch_likely(&__use_tsc)) {
> +     if (static_branch_likely(&__use_tsc) || tsc_inited) {
>               u64 tsc_now = rdtsc();
>  
>               /* return the value in ns */
> @@ -1387,30 +1391,16 @@ static int __init init_tsc_clocksource(void)
>   */
>  device_initcall(init_tsc_clocksource);
>  
> -void __init tsc_early_delay_calibrate(void)
> -{
> -     unsigned long lpj;
> -
> -     if (!boot_cpu_has(X86_FEATURE_TSC))
> -             return;
> -
> -     cpu_khz = x86_platform.calibrate_cpu();
> -     tsc_khz = x86_platform.calibrate_tsc();
> -
> -     tsc_khz = tsc_khz ? : cpu_khz;
> -     if (!tsc_khz)
> -             return;
> -
> -     lpj = tsc_khz * 1000;
> -     do_div(lpj, HZ);
> -     loops_per_jiffy = lpj;
> -}
> -
>  void __init tsc_init(void)
>  {
>       u64 lpj, cyc;
>       int cpu;
>  
> +     if (tsc_inited)
> +             return;
> +
> +     tsc_inited = 1;
> +
>       if (!boot_cpu_has(X86_FEATURE_TSC)) {
>               setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
>               return;
> @@ -1474,11 +1464,15 @@ void __init tsc_init(void)
>       lpj = ((u64)tsc_khz * 1000);
>       do_div(lpj, HZ);
>       lpj_fine = lpj;
> +     loops_per_jiffy = lpj;
>  
>       use_tsc_delay();
>  
>       check_system_tsc_reliable();
>  
> +     extern void early_set_sched_clock_stable(u64 sched_clock_offset);
> +     early_set_sched_clock_stable(div64_u64(rdtsc() * 1000, tsc_khz));
> +
>       if (unsynchronized_tsc()) {
>               mark_tsc_unstable("TSCs unsynchronized");
>               return;
> diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
> index 10c83e7..6c5c22d 100644
> --- a/kernel/sched/clock.c
> +++ b/kernel/sched/clock.c
> @@ -119,6 +119,13 @@ static void __scd_stamp(struct sched_clock_data *scd)
>       scd->tick_raw = sched_clock();
>  }
>  
> +
> +void early_set_sched_clock_stable(u64 sched_clock_offset)
> +{
> +     __sched_clock_offset = sched_clock_offset;
> +     static_branch_enable(&__sched_clock_stable);
> +}
> +
>  static void __set_sched_clock_stable(void)
>  {
>       struct sched_clock_data *scd;
> @@ -342,12 +349,14 @@ static u64 sched_clock_remote(struct sched_clock_data 
> *scd)
>   *
>   * See cpu_clock().
>   */
> +
> +extern int tsc_inited;
>  u64 sched_clock_cpu(int cpu)
>  {
>       struct sched_clock_data *scd;
>       u64 clock;
>  
> -     if (sched_clock_stable())
> +     if (sched_clock_stable() || tsc_inited)
>               return sched_clock() + __sched_clock_offset;
>  
>       if (unlikely(!sched_clock_running))
>          
> 
> 
> 
>>>
>>> If you have a dodgy part (sorry SKX), you'll just have to live with
>>> sched_clock starting late(r).
>>>
>>> Do not cobble things on the side, try and get the normal things running
>>> earlier.

Reply via email to