On Fri, Nov 08, 2019 at 09:34:27PM +0100, Arnd Bergmann wrote:
> 'struct timespec' is getting removed from the kernel. The usage in ipmi
> was fixed before in commit 48862ea2ce86 ("ipmi: Update timespec usage
> to timespec64"), but unfortunately it crept back in.
> 
> The busy looping code can better use ktime_t anyway, so use that
> there to simplify the implementation.

Thanks, this is a big improvement.  I have this queued, but if you
are going to submit this, I can remove it, and:

Reviewed-by: Corey Minyard <cminy...@mvista.com>

Do you think this should go in to 5.4?

-corey

> 
> Fixes: cbb19cb1eef0 ("ipmi_si: Convert timespec64 to timespec")
> Signed-off-by: Arnd Bergmann <a...@arndb.de>
> ---
>  drivers/char/ipmi/ipmi_si_intf.c | 40 +++++++++++---------------------
>  1 file changed, 13 insertions(+), 27 deletions(-)
> 
> diff --git a/drivers/char/ipmi/ipmi_si_intf.c 
> b/drivers/char/ipmi/ipmi_si_intf.c
> index 6b9a0593d2eb..c7cc8538b84a 100644
> --- a/drivers/char/ipmi/ipmi_si_intf.c
> +++ b/drivers/char/ipmi/ipmi_si_intf.c
> @@ -265,10 +265,10 @@ static void cleanup_ipmi_si(void);
>  #ifdef DEBUG_TIMING
>  void debug_timestamp(char *msg)
>  {
> -     struct timespec t;
> +     struct timespec64 t;
>  
> -     ktime_get_ts(&t);
> -     pr_debug("**%s: %ld.%9.9ld\n", msg, (long) t.tv_sec, t.tv_nsec);
> +     ktime_get_ts64(&t);
> +     pr_debug("**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec);
>  }
>  #else
>  #define debug_timestamp(x)
> @@ -935,38 +935,25 @@ static void set_run_to_completion(void *send_info, bool 
> i_run_to_completion)
>  }
>  
>  /*
> - * Use -1 in the nsec value of the busy waiting timespec to tell that
> - * we are spinning in kipmid looking for something and not delaying
> - * between checks
> + * Use -1 as a special constant to tell that we are spinning in kipmid
> + * looking for something and not delaying between checks
>   */
> -static inline void ipmi_si_set_not_busy(struct timespec *ts)
> -{
> -     ts->tv_nsec = -1;
> -}
> -static inline int ipmi_si_is_busy(struct timespec *ts)
> -{
> -     return ts->tv_nsec != -1;
> -}
> -
> +#define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
>  static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
>                                        const struct smi_info *smi_info,
> -                                      struct timespec *busy_until)
> +                                      ktime_t *busy_until)
>  {
>       unsigned int max_busy_us = 0;
>  
>       if (smi_info->si_num < num_max_busy_us)
>               max_busy_us = kipmid_max_busy_us[smi_info->si_num];
>       if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
> -             ipmi_si_set_not_busy(busy_until);
> -     else if (!ipmi_si_is_busy(busy_until)) {
> -             ktime_get_ts(busy_until);
> -             timespec_add_ns(busy_until, max_busy_us * NSEC_PER_USEC);
> +             *busy_until = IPMI_TIME_NOT_BUSY;
> +     else if (*busy_until == IPMI_TIME_NOT_BUSY) {
> +             *busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC;
>       } else {
> -             struct timespec now;
> -
> -             ktime_get_ts(&now);
> -             if (unlikely(timespec_compare(&now, busy_until) > 0)) {
> -                     ipmi_si_set_not_busy(busy_until);
> +             if (unlikely(ktime_get() > *busy_until)) {
> +                     *busy_until = IPMI_TIME_NOT_BUSY;
>                       return false;
>               }
>       }
> @@ -988,9 +975,8 @@ static int ipmi_thread(void *data)
>       struct smi_info *smi_info = data;
>       unsigned long flags;
>       enum si_sm_result smi_result;
> -     struct timespec busy_until = { 0, 0 };
> +     ktime_t busy_until = IPMI_TIME_NOT_BUSY;
>  
> -     ipmi_si_set_not_busy(&busy_until);
>       set_user_nice(current, MAX_NICE);
>       while (!kthread_should_stop()) {
>               int busy_wait;
> -- 
> 2.20.0
> 


_______________________________________________
Openipmi-developer mailing list
Openipmi-developer@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/openipmi-developer

Reply via email to