Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted

2016-10-19 Thread Heiko Carstens
On Wed, Oct 19, 2016 at 08:56:36AM +0200, Christian Borntraeger wrote:
> On 09/29/2016 05:51 PM, Christian Borntraeger wrote:
> > this implements the s390 backend for commit
> > "kernel/sched: introduce vcpu preempted check interface"
> > by reworking the existing smp_vcpu_scheduled into
> > arch_vcpu_is_preempted. We can then also get rid of the
> > local cpu_is_preempted function by moving the
> > CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
> > 
> > Signed-off-by: Christian Borntraeger 
> 
> 
> Martin, Peter,
> 
> I think we could go with the patch as is. In other words not providing
> arch_vcpu_is_preempted for !CONFIG_SMP.
> 
> This will result in compile errors if code does spinning or yielding for
> non-SMP kernels - which does not make sense to me, so this might actually
> be a nice indicator.
> If you prefer the !CONFIG_SMP implementation let me know and I will respin.

...but I do prefer an implementation for !CONFIG_SMP. I'm tired of fixing
silly compile errors that only happen on s390.

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted

2016-10-19 Thread Christian Borntraeger
On 09/29/2016 05:51 PM, Christian Borntraeger wrote:
> this implements the s390 backend for commit
> "kernel/sched: introduce vcpu preempted check interface"
> by reworking the existing smp_vcpu_scheduled into
> arch_vcpu_is_preempted. We can then also get rid of the
> local cpu_is_preempted function by moving the
> CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
> 
> Signed-off-by: Christian Borntraeger 


Martin, Peter,

I think we could go with the patch as is. In other words not providing
arch_vcpu_is_preempted for !CONFIG_SMP.

This will result in compile errors if code does spinning or yielding for
non-SMP kernels - which does not make sense to me, so this might actually
be a nice indicator.
If you prefer the !CONFIG_SMP implementation let me know and I will respin.

In any case, Martin if the patch is ok for you, can you ack, so that Peter 
can take that patch together with Pan Xinhui series?

> ---
>  arch/s390/include/asm/spinlock.h |  3 +++
>  arch/s390/kernel/smp.c   |  9 +++--
>  arch/s390/lib/spinlock.c | 25 -
>  3 files changed, 18 insertions(+), 19 deletions(-)
> 
> diff --git a/arch/s390/include/asm/spinlock.h 
> b/arch/s390/include/asm/spinlock.h
> index 63ebf37..e16e02f 100644
> --- a/arch/s390/include/asm/spinlock.h
> +++ b/arch/s390/include/asm/spinlock.h
> @@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, 
> unsigned int new)
>   return __sync_bool_compare_and_swap(lock, old, new);
>  }
> 
> +bool arch_vcpu_is_preempted(int cpu);
> +#define vcpu_is_preempted arch_vcpu_is_preempted
> +
>  /*
>   * Simple spin lock operations.  There are two variants, one clears IRQ's
>   * on the local processor, one does not.
> diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
> index 7b89a75..4aadd16 100644
> --- a/arch/s390/kernel/smp.c
> +++ b/arch/s390/kernel/smp.c
> @@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
>   return -1;
>  }
> 
> -int smp_vcpu_scheduled(int cpu)
> +bool arch_vcpu_is_preempted(int cpu)
>  {
> - return pcpu_running(pcpu_devices + cpu);
> + if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
> + return false;
> + if (pcpu_running(pcpu_devices + cpu))
> + return false;
> + return true;
>  }
> +EXPORT_SYMBOL(arch_vcpu_is_preempted);
> 
>  void smp_yield_cpu(int cpu)
>  {
> diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
> index e5f50a7..e48a48e 100644
> --- a/arch/s390/lib/spinlock.c
> +++ b/arch/s390/lib/spinlock.c
> @@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int 
> *lock, unsigned int old)
>   asm(".insn rsy,0xeb22,%0,0,%1" : : "d" (old), "Q" (*lock));
>  }
> 
> -static inline int cpu_is_preempted(int cpu)
> -{
> - if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
> - return 0;
> - if (smp_vcpu_scheduled(cpu))
> - return 0;
> - return 1;
> -}
> -
>  void arch_spin_lock_wait(arch_spinlock_t *lp)
>  {
>   unsigned int cpu = SPINLOCK_LOCKVAL;
> @@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
>   continue;
>   }
>   /* First iteration: check if the lock owner is running. */
> - if (first_diag && cpu_is_preempted(~owner)) {
> + if (first_diag && arch_vcpu_is_preempted(~owner)) {
>   smp_yield_cpu(~owner);
>   first_diag = 0;
>   continue;
> @@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
>* yield the CPU unconditionally. For LPAR rely on the
>* sense running status.
>*/
> - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
> + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
>   smp_yield_cpu(~owner);
>   first_diag = 0;
>   }
> @@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, 
> unsigned long flags)
>   continue;
>   }
>   /* Check if the lock owner is running. */
> - if (first_diag && cpu_is_preempted(~owner)) {
> + if (first_diag && arch_vcpu_is_preempted(~owner)) {
>   smp_yield_cpu(~owner);
>   first_diag = 0;
>   continue;
> @@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, 
> unsigned long flags)
>* yield the CPU unconditionally. For LPAR rely on the
>* sense running status.
>*/
> - if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
> + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
>   smp_yield_cpu(~owner);
>   first_diag = 0;
>   }
> @@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
>   owner = 0;
>   while 

Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted

2016-10-04 Thread Christian Borntraeger
On 09/30/2016 08:35 AM, Pan Xinhui wrote:
> 
> 
> 在 2016/9/30 13:52, Boqun Feng 写道:
>> On Fri, Sep 30, 2016 at 12:49:52PM +0800, Pan Xinhui wrote:
>>>
>>>
>>> 在 2016/9/29 23:51, Christian Borntraeger 写道:
 this implements the s390 backend for commit
 "kernel/sched: introduce vcpu preempted check interface"
 by reworking the existing smp_vcpu_scheduled into
 arch_vcpu_is_preempted. We can then also get rid of the
 local cpu_is_preempted function by moving the
 CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.

 Signed-off-by: Christian Borntraeger 
 ---
>>>
>>> hi, Christian
>>> thanks for your patch!
>>>
  arch/s390/include/asm/spinlock.h |  3 +++
  arch/s390/kernel/smp.c   |  9 +++--
  arch/s390/lib/spinlock.c | 25 -
  3 files changed, 18 insertions(+), 19 deletions(-)

 diff --git a/arch/s390/include/asm/spinlock.h 
 b/arch/s390/include/asm/spinlock.h
 index 63ebf37..e16e02f 100644
 --- a/arch/s390/include/asm/spinlock.h
 +++ b/arch/s390/include/asm/spinlock.h
 @@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int 
 old, unsigned int new)
  return __sync_bool_compare_and_swap(lock, old, new);
  }

 +bool arch_vcpu_is_preempted(int cpu);
 +#define vcpu_is_preempted arch_vcpu_is_preempted
 +
  /*
   * Simple spin lock operations.  There are two variants, one clears IRQ's
   * on the local processor, one does not.
 diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
 index 7b89a75..4aadd16 100644
 --- a/arch/s390/kernel/smp.c
 +++ b/arch/s390/kernel/smp.c
 @@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
  return -1;
  }

 -int smp_vcpu_scheduled(int cpu)
>>> root@ltcalpine2-lp13:~/linux# git grep -wn smp_vcpu_scheduled arch/s390/
>>> arch/s390/include/asm/smp.h:34:extern int smp_vcpu_scheduled(int cpu);
>>> arch/s390/include/asm/smp.h:56:static inline int smp_vcpu_scheduled(int 
>>> cpu) { return 1; }
>>> arch/s390/kernel/smp.c:371:int smp_vcpu_scheduled(int cpu)
>>> arch/s390/lib/spinlock.c:44:if (smp_vcpu_scheduled(cpu))
>>>
 +bool arch_vcpu_is_preempted(int cpu)
  {
 -return pcpu_running(pcpu_devices + cpu);
 +if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
 +return false;
 +if (pcpu_running(pcpu_devices + cpu))
 +return false;
>>> I saw smp_vcpu_scheduled() returns true always on !SMP system.
>>>
>>> maybe we can do somegthing silimar. like below
>>>
>>> #ifndef CONFIG_SMP
>>> static inline bool arch_vcpu_is_preempted(int cpu) { return 
>>> !test_cpu_flag_of(CIF_ENABLED_WAIT, cpu); }
>>> #else
>>> ...
>>>
>>> but I can't help thinking that if this is a!SMP system, maybe we could only
>>> #ifndef CONFIG_SMP
>>> static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
>>> #else
>>
>> Why do we need a vcpu_is_preempted() implementation for UP? Where will
>> you use it?
>>
> yep, I also wonder that :)
> 
> But there is a definitaion of smp_vcpu_scheduled() for !SMP kernel.
> So I am a little worried that some code has included this spinlock.h for UP 
> kernel also.
> 
> Hi, Christian
> Could you help confirms that your patch works on UP? :)

My patch as is seems to work fine for !SMP. So it looks like the extra define
is not necessary and we could simply go with v2




___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted

2016-09-30 Thread Christian Borntraeger
On 09/30/2016 06:49 AM, Pan Xinhui wrote:

> 
> but I can't help thinking that if this is a!SMP system, maybe we could only
> #ifndef CONFIG_SMP
> static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
> #else

Yes, I will add that to v3. Thanks for spotting.

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted

2016-09-30 Thread Pan Xinhui



在 2016/9/30 13:52, Boqun Feng 写道:

On Fri, Sep 30, 2016 at 12:49:52PM +0800, Pan Xinhui wrote:



在 2016/9/29 23:51, Christian Borntraeger 写道:

this implements the s390 backend for commit
"kernel/sched: introduce vcpu preempted check interface"
by reworking the existing smp_vcpu_scheduled into
arch_vcpu_is_preempted. We can then also get rid of the
local cpu_is_preempted function by moving the
CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.

Signed-off-by: Christian Borntraeger 
---


hi, Christian
thanks for your patch!


 arch/s390/include/asm/spinlock.h |  3 +++
 arch/s390/kernel/smp.c   |  9 +++--
 arch/s390/lib/spinlock.c | 25 -
 3 files changed, 18 insertions(+), 19 deletions(-)

diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 63ebf37..e16e02f 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, 
unsigned int new)
return __sync_bool_compare_and_swap(lock, old, new);
 }

+bool arch_vcpu_is_preempted(int cpu);
+#define vcpu_is_preempted arch_vcpu_is_preempted
+
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7b89a75..4aadd16 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
return -1;
 }

-int smp_vcpu_scheduled(int cpu)

root@ltcalpine2-lp13:~/linux# git grep -wn smp_vcpu_scheduled arch/s390/
arch/s390/include/asm/smp.h:34:extern int smp_vcpu_scheduled(int cpu);
arch/s390/include/asm/smp.h:56:static inline int smp_vcpu_scheduled(int cpu) { 
return 1; }
arch/s390/kernel/smp.c:371:int smp_vcpu_scheduled(int cpu)
arch/s390/lib/spinlock.c:44:if (smp_vcpu_scheduled(cpu))


+bool arch_vcpu_is_preempted(int cpu)
 {
-   return pcpu_running(pcpu_devices + cpu);
+   if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
+   return false;
+   if (pcpu_running(pcpu_devices + cpu))
+   return false;

I saw smp_vcpu_scheduled() returns true always on !SMP system.

maybe we can do somegthing silimar. like below

#ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted(int cpu) { return 
!test_cpu_flag_of(CIF_ENABLED_WAIT, cpu); }
#else
...

but I can't help thinking that if this is a!SMP system, maybe we could only
#ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
#else


Why do we need a vcpu_is_preempted() implementation for UP? Where will
you use it?


yep, I also wonder that :)

But there is a definitaion of smp_vcpu_scheduled() for !SMP kernel.
So I am a little worried that some code has included this spinlock.h for UP 
kernel also.

Hi, Christian
Could you help confirms that your patch works on UP? :)

thanks
xinhui


Regards,
Boqun


...


thanks
xinhui


+   return true;
 }
+EXPORT_SYMBOL(arch_vcpu_is_preempted);

 void smp_yield_cpu(int cpu)
 {
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index e5f50a7..e48a48e 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int 
*lock, unsigned int old)
asm(".insn rsy,0xeb22,%0,0,%1" : : "d" (old), "Q" (*lock));
 }

-static inline int cpu_is_preempted(int cpu)
-{
-   if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
-   return 0;
-   if (smp_vcpu_scheduled(cpu))
-   return 0;
-   return 1;
-}
-
 void arch_spin_lock_wait(arch_spinlock_t *lp)
 {
unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
continue;
}
/* First iteration: check if the lock owner is running. */
-   if (first_diag && cpu_is_preempted(~owner)) {
+   if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
 * yield the CPU unconditionally. For LPAR rely on the
 * sense running status.
 */
-   if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+   if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned 
long flags)
continue;
}
/* Check if the lock owner is running. */
-   if (first_diag && cpu_is_preempted(~owner)) {
+   if (first_diag && arch_vcpu_is_preempted(~owner)) 

Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted

2016-09-29 Thread Boqun Feng
On Fri, Sep 30, 2016 at 12:49:52PM +0800, Pan Xinhui wrote:
> 
> 
> 在 2016/9/29 23:51, Christian Borntraeger 写道:
> > this implements the s390 backend for commit
> > "kernel/sched: introduce vcpu preempted check interface"
> > by reworking the existing smp_vcpu_scheduled into
> > arch_vcpu_is_preempted. We can then also get rid of the
> > local cpu_is_preempted function by moving the
> > CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.
> > 
> > Signed-off-by: Christian Borntraeger 
> > ---
> 
> hi, Christian
>   thanks for your patch!
> 
> >  arch/s390/include/asm/spinlock.h |  3 +++
> >  arch/s390/kernel/smp.c   |  9 +++--
> >  arch/s390/lib/spinlock.c | 25 -
> >  3 files changed, 18 insertions(+), 19 deletions(-)
> > 
> > diff --git a/arch/s390/include/asm/spinlock.h 
> > b/arch/s390/include/asm/spinlock.h
> > index 63ebf37..e16e02f 100644
> > --- a/arch/s390/include/asm/spinlock.h
> > +++ b/arch/s390/include/asm/spinlock.h
> > @@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int 
> > old, unsigned int new)
> > return __sync_bool_compare_and_swap(lock, old, new);
> >  }
> > 
> > +bool arch_vcpu_is_preempted(int cpu);
> > +#define vcpu_is_preempted arch_vcpu_is_preempted
> > +
> >  /*
> >   * Simple spin lock operations.  There are two variants, one clears IRQ's
> >   * on the local processor, one does not.
> > diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
> > index 7b89a75..4aadd16 100644
> > --- a/arch/s390/kernel/smp.c
> > +++ b/arch/s390/kernel/smp.c
> > @@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
> > return -1;
> >  }
> > 
> > -int smp_vcpu_scheduled(int cpu)
> root@ltcalpine2-lp13:~/linux# git grep -wn smp_vcpu_scheduled arch/s390/
> arch/s390/include/asm/smp.h:34:extern int smp_vcpu_scheduled(int cpu);
> arch/s390/include/asm/smp.h:56:static inline int smp_vcpu_scheduled(int cpu) 
> { return 1; }
> arch/s390/kernel/smp.c:371:int smp_vcpu_scheduled(int cpu)
> arch/s390/lib/spinlock.c:44:if (smp_vcpu_scheduled(cpu))
> 
> > +bool arch_vcpu_is_preempted(int cpu)
> >  {
> > -   return pcpu_running(pcpu_devices + cpu);
> > +   if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
> > +   return false;
> > +   if (pcpu_running(pcpu_devices + cpu))
> > +   return false;
> I saw smp_vcpu_scheduled() returns true always on !SMP system.
> 
> maybe we can do somegthing silimar. like below
> 
> #ifndef CONFIG_SMP
> static inline bool arch_vcpu_is_preempted(int cpu) { return 
> !test_cpu_flag_of(CIF_ENABLED_WAIT, cpu); }
> #else
> ...
> 
> but I can't help thinking that if this is a!SMP system, maybe we could only
> #ifndef CONFIG_SMP
> static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
> #else

Why do we need a vcpu_is_preempted() implementation for UP? Where will
you use it?

Regards,
Boqun

> ...
> 
> 
> thanks
> xinhui
> 
> > +   return true;
> >  }
> > +EXPORT_SYMBOL(arch_vcpu_is_preempted);
> > 
> >  void smp_yield_cpu(int cpu)
> >  {
> > diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
> > index e5f50a7..e48a48e 100644
> > --- a/arch/s390/lib/spinlock.c
> > +++ b/arch/s390/lib/spinlock.c
> > @@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int 
> > *lock, unsigned int old)
> > asm(".insn rsy,0xeb22,%0,0,%1" : : "d" (old), "Q" (*lock));
> >  }
> > 
> > -static inline int cpu_is_preempted(int cpu)
> > -{
> > -   if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
> > -   return 0;
> > -   if (smp_vcpu_scheduled(cpu))
> > -   return 0;
> > -   return 1;
> > -}
> > -
> >  void arch_spin_lock_wait(arch_spinlock_t *lp)
> >  {
> > unsigned int cpu = SPINLOCK_LOCKVAL;
> > @@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
> > continue;
> > }
> > /* First iteration: check if the lock owner is running. */
> > -   if (first_diag && cpu_is_preempted(~owner)) {
> > +   if (first_diag && arch_vcpu_is_preempted(~owner)) {
> > smp_yield_cpu(~owner);
> > first_diag = 0;
> > continue;
> > @@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
> >  * yield the CPU unconditionally. For LPAR rely on the
> >  * sense running status.
> >  */
> > -   if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
> > +   if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
> > smp_yield_cpu(~owner);
> > first_diag = 0;
> > }
> > @@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, 
> > unsigned long flags)
> > continue;
> > }
> > /* Check if the lock owner is running. */
> > -   if (first_diag && cpu_is_preempted(~owner)) {
> > +   if (first_diag && arch_vcpu_is_preempted(~owner)) {
> >

Re: [PATCH v2 1/1] s390/spinlock: Provide vcpu_is_preempted

2016-09-29 Thread Pan Xinhui



在 2016/9/29 23:51, Christian Borntraeger 写道:

this implements the s390 backend for commit
"kernel/sched: introduce vcpu preempted check interface"
by reworking the existing smp_vcpu_scheduled into
arch_vcpu_is_preempted. We can then also get rid of the
local cpu_is_preempted function by moving the
CIF_ENABLED_WAIT test into arch_vcpu_is_preempted.

Signed-off-by: Christian Borntraeger 
---


hi, Christian
thanks for your patch!


 arch/s390/include/asm/spinlock.h |  3 +++
 arch/s390/kernel/smp.c   |  9 +++--
 arch/s390/lib/spinlock.c | 25 -
 3 files changed, 18 insertions(+), 19 deletions(-)

diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 63ebf37..e16e02f 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -21,6 +21,9 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, 
unsigned int new)
return __sync_bool_compare_and_swap(lock, old, new);
 }

+bool arch_vcpu_is_preempted(int cpu);
+#define vcpu_is_preempted arch_vcpu_is_preempted
+
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 7b89a75..4aadd16 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -376,10 +376,15 @@ int smp_find_processor_id(u16 address)
return -1;
 }

-int smp_vcpu_scheduled(int cpu)
 
root@ltcalpine2-lp13:~/linux# git grep -wn smp_vcpu_scheduled arch/s390/

arch/s390/include/asm/smp.h:34:extern int smp_vcpu_scheduled(int cpu);
arch/s390/include/asm/smp.h:56:static inline int smp_vcpu_scheduled(int cpu) { 
return 1; }
arch/s390/kernel/smp.c:371:int smp_vcpu_scheduled(int cpu)
arch/s390/lib/spinlock.c:44:if (smp_vcpu_scheduled(cpu))


+bool arch_vcpu_is_preempted(int cpu)
 {
-   return pcpu_running(pcpu_devices + cpu);
+   if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
+   return false;
+   if (pcpu_running(pcpu_devices + cpu))
+   return false;

I saw smp_vcpu_scheduled() returns true always on !SMP system.

maybe we can do somegthing silimar. like below

#ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted(int cpu) { return 
!test_cpu_flag_of(CIF_ENABLED_WAIT, cpu); }
#else
...

but I can't help thinking that if this is a!SMP system, maybe we could only
#ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
#else
...


thanks
xinhui


+   return true;
 }
+EXPORT_SYMBOL(arch_vcpu_is_preempted);

 void smp_yield_cpu(int cpu)
 {
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index e5f50a7..e48a48e 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int 
*lock, unsigned int old)
asm(".insn rsy,0xeb22,%0,0,%1" : : "d" (old), "Q" (*lock));
 }

-static inline int cpu_is_preempted(int cpu)
-{
-   if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
-   return 0;
-   if (smp_vcpu_scheduled(cpu))
-   return 0;
-   return 1;
-}
-
 void arch_spin_lock_wait(arch_spinlock_t *lp)
 {
unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
continue;
}
/* First iteration: check if the lock owner is running. */
-   if (first_diag && cpu_is_preempted(~owner)) {
+   if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
 * yield the CPU unconditionally. For LPAR rely on the
 * sense running status.
 */
-   if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+   if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned 
long flags)
continue;
}
/* Check if the lock owner is running. */
-   if (first_diag && cpu_is_preempted(~owner)) {
+   if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, 
unsigned long flags)
 * yield the CPU unconditionally. For LPAR rely on the
 * sense running status.
 */
-   if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+   if (!MACHINE_IS_LPAR ||