On Sun, Sep 10, 2023 at 04:29:05AM -0400, guo...@kernel.org wrote:
> From: Guo Ren <guo...@linux.alibaba.com>
> 
> Using static_call to switch between:
>   native_queued_spin_lock_slowpath()    __pv_queued_spin_lock_slowpath()
>   native_queued_spin_unlock()           __pv_queued_spin_unlock()
> 
> Finish the pv_wait implementation, but pv_kick needs the SBI
> definition of the next patches.
> 
> Signed-off-by: Guo Ren <guo...@linux.alibaba.com>
> Signed-off-by: Guo Ren <guo...@kernel.org>
> ---
>  arch/riscv/include/asm/Kbuild               |  1 -
>  arch/riscv/include/asm/qspinlock.h          | 35 +++++++++++++
>  arch/riscv/include/asm/qspinlock_paravirt.h | 29 +++++++++++
>  arch/riscv/include/asm/spinlock.h           |  2 +-
>  arch/riscv/kernel/qspinlock_paravirt.c      | 57 +++++++++++++++++++++
>  arch/riscv/kernel/setup.c                   |  4 ++
>  6 files changed, 126 insertions(+), 2 deletions(-)
>  create mode 100644 arch/riscv/include/asm/qspinlock.h
>  create mode 100644 arch/riscv/include/asm/qspinlock_paravirt.h
>  create mode 100644 arch/riscv/kernel/qspinlock_paravirt.c
> 
> diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
> index a0dc85e4a754..b89cb3b73c13 100644
> --- a/arch/riscv/include/asm/Kbuild
> +++ b/arch/riscv/include/asm/Kbuild
> @@ -7,6 +7,5 @@ generic-y += parport.h
>  generic-y += spinlock_types.h
>  generic-y += qrwlock.h
>  generic-y += qrwlock_types.h
> -generic-y += qspinlock.h
>  generic-y += user.h
>  generic-y += vmlinux.lds.h
> diff --git a/arch/riscv/include/asm/qspinlock.h 
> b/arch/riscv/include/asm/qspinlock.h
> new file mode 100644
> index 000000000000..7d4f416c908c
> --- /dev/null
> +++ b/arch/riscv/include/asm/qspinlock.h
> @@ -0,0 +1,35 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (c), 2023 Alibaba Cloud
> + * Authors:
> + *   Guo Ren <guo...@linux.alibaba.com>
> + */
> +
> +#ifndef _ASM_RISCV_QSPINLOCK_H
> +#define _ASM_RISCV_QSPINLOCK_H
> +
> +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> +#include <asm/qspinlock_paravirt.h>
> +
> +/* How long a lock should spin before we consider blocking */
> +#define SPIN_THRESHOLD               (1 << 15)
> +
> +void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> +void __pv_init_lock_hash(void);
> +void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> +
> +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
> +{
> +     static_call(pv_queued_spin_lock_slowpath)(lock, val);
> +}
> +
> +#define queued_spin_unlock   queued_spin_unlock
> +static inline void queued_spin_unlock(struct qspinlock *lock)
> +{
> +     static_call(pv_queued_spin_unlock)(lock);
> +}
> +#endif /* CONFIG_PARAVIRT_SPINLOCKS */
> +
> +#include <asm-generic/qspinlock.h>
> +
> +#endif /* _ASM_RISCV_QSPINLOCK_H */
> diff --git a/arch/riscv/include/asm/qspinlock_paravirt.h 
> b/arch/riscv/include/asm/qspinlock_paravirt.h
> new file mode 100644
> index 000000000000..9681e851f69d
> --- /dev/null
> +++ b/arch/riscv/include/asm/qspinlock_paravirt.h
> @@ -0,0 +1,29 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Copyright (c), 2023 Alibaba Cloud
> + * Authors:
> + *   Guo Ren <guo...@linux.alibaba.com>
> + */
> +
> +#ifndef _ASM_RISCV_QSPINLOCK_PARAVIRT_H
> +#define _ASM_RISCV_QSPINLOCK_PARAVIRT_H
> +
> +void pv_wait(u8 *ptr, u8 val);
> +void pv_kick(int cpu);
> +
> +void dummy_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
> +void dummy_queued_spin_unlock(struct qspinlock *lock);
> +
> +DECLARE_STATIC_CALL(pv_queued_spin_lock_slowpath, 
> dummy_queued_spin_lock_slowpath);
> +DECLARE_STATIC_CALL(pv_queued_spin_unlock, dummy_queued_spin_unlock);
> +
> +void __init pv_qspinlock_init(void);
> +
> +static inline bool pv_is_native_spin_unlock(void)
> +{
> +     return false;
> +}
> +
> +void __pv_queued_spin_unlock(struct qspinlock *lock);
> +
> +#endif /* _ASM_RISCV_QSPINLOCK_PARAVIRT_H */
> diff --git a/arch/riscv/include/asm/spinlock.h 
> b/arch/riscv/include/asm/spinlock.h
> index 6b38d6616f14..ed4253f491fe 100644
> --- a/arch/riscv/include/asm/spinlock.h
> +++ b/arch/riscv/include/asm/spinlock.h
> @@ -39,7 +39,7 @@ static inline bool virt_spin_lock(struct qspinlock *lock)
>  #undef arch_spin_trylock
>  #undef arch_spin_unlock
>  
> -#include <asm-generic/qspinlock.h>
> +#include <asm/qspinlock.h>
>  #include <linux/jump_label.h>
>  
>  #undef arch_spin_is_locked
> diff --git a/arch/riscv/kernel/qspinlock_paravirt.c 
> b/arch/riscv/kernel/qspinlock_paravirt.c
> new file mode 100644
> index 000000000000..85ff5a3ec234
> --- /dev/null
> +++ b/arch/riscv/kernel/qspinlock_paravirt.c
> @@ -0,0 +1,57 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (c), 2023 Alibaba Cloud
> + * Authors:
> + *   Guo Ren <guo...@linux.alibaba.com>
> + */
> +
> +#include <linux/static_call.h>
> +#include <asm/qspinlock_paravirt.h>
> +#include <asm/sbi.h>
> +
> +void pv_kick(int cpu)
> +{
> +     return;
> +}
> +
> +void pv_wait(u8 *ptr, u8 val)
> +{
> +     unsigned long flags;
> +
> +     if (in_nmi())
> +             return;
> +
> +     local_irq_save(flags);
> +     if (READ_ONCE(*ptr) != val)
> +             goto out;
> +
> +     /* wait_for_interrupt(); */
> +out:
> +     local_irq_restore(flags);
> +}
> +
> +static void native_queued_spin_unlock(struct qspinlock *lock)
> +{
> +     smp_store_release(&lock->locked, 0);
> +}
> +
> +DEFINE_STATIC_CALL(pv_queued_spin_lock_slowpath, 
> native_queued_spin_lock_slowpath);
> +EXPORT_STATIC_CALL(pv_queued_spin_lock_slowpath);
> +
> +DEFINE_STATIC_CALL(pv_queued_spin_unlock, native_queued_spin_unlock);
> +EXPORT_STATIC_CALL(pv_queued_spin_unlock);
> +
> +void __init pv_qspinlock_init(void)
> +{
> +     if (num_possible_cpus() == 1)
> +             return;
> +
> +     if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM)

Checks like this seem to be very common on this patchset.
For someone not much familiar with this, it can be hard to 
understand.

I mean, on patch 8/17 you introduce those IDs, which look to be 
incremental ( ID == N includes stuff from ID < N ), but I am not sure as I 
couln't find much documentation on that.

Then above you test for the id being different than 
SBI_EXT_BASE_IMPL_ID_KVM, but if they are actually incremental and a new 
version lands, the new version will also return early because it passes the 
test.

I am no sure if above is right, but it's all I could understand without 
documentation.

Well, my point is: this seems hard to understand & review, so it would be 
nice to have a macro like this to be used instead:

#define sbi_fw_implements_kvm() \
        (sbi_get_firmware_id() >= SBI_EXT_BASE_IMPL_ID_KVM)

if(!sbi_fw_implements_kvm())
        return;

What do you think?

Other than that, LGTM.

Thanks!
Leo

> +             return;
> +
> +     pr_info("PV qspinlocks enabled\n");
> +     __pv_init_lock_hash();
> +
> +     static_call_update(pv_queued_spin_lock_slowpath, 
> __pv_queued_spin_lock_slowpath);
> +     static_call_update(pv_queued_spin_unlock, __pv_queued_spin_unlock);
> +}
> diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
> index c57d15b05160..88690751f2ee 100644
> --- a/arch/riscv/kernel/setup.c
> +++ b/arch/riscv/kernel/setup.c
> @@ -321,6 +321,10 @@ static void __init riscv_spinlock_init(void)
>  #ifdef CONFIG_QUEUED_SPINLOCKS
>       virt_spin_lock_init();
>  #endif
> +
> +#ifdef CONFIG_PARAVIRT_SPINLOCKS
> +     pv_qspinlock_init();
> +#endif
>  }
>  
>  extern void __init init_rt_signal_env(void);
> -- 
> 2.36.1
> 

Reply via email to