Re: [PATCH 6/8] powerpc/pseries: implement paravirt qspinlocks for SPLPAR

2020-07-02 Thread Waiman Long

On 7/2/20 12:15 PM, kernel test robot wrote:

Hi Nicholas,

I love your patch! Yet something to improve:

[auto build test ERROR on powerpc/next]
[also build test ERROR on tip/locking/core v5.8-rc3 next-20200702]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use  as documented in
https://git-scm.com/docs/git-format-patch]

url:
https://github.com/0day-ci/linux/commits/Nicholas-Piggin/powerpc-queued-spinlocks-and-rwlocks/20200702-155158
base:   https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
 wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
 chmod +x ~/bin/make.cross
 # save the attached .config to linux build tree
 COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross 
ARCH=powerpc

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot 

All errors (new ones prefixed by >>):

kernel/locking/lock_events.c:61:16: warning: no previous prototype for 
'lockevent_read' [-Wmissing-prototypes]
   61 | ssize_t __weak lockevent_read(struct file *file, char __user 
*user_buf,
  |^~
kernel/locking/lock_events.c: In function 'skip_lockevent':

kernel/locking/lock_events.c:126:12: error: implicit declaration of function 
'pv_is_native_spin_unlock' [-Werror=implicit-function-declaration]

  126 |   pv_on = !pv_is_native_spin_unlock();
  |^~~~
cc1: some warnings being treated as errors

vim +/pv_is_native_spin_unlock +126 kernel/locking/lock_events.c


I think you will need to add the following into 
arch/powerpc/include/asm/qspinlock_paravirt.h:


static inline pv_is_native_spin_unlock(void)
{
    return !is_shared_processor();
}

Cheers,
Longman

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization


Re: [PATCH 6/8] powerpc/pseries: implement paravirt qspinlocks for SPLPAR

2020-07-02 Thread Waiman Long

On 7/2/20 3:48 AM, Nicholas Piggin wrote:

Signed-off-by: Nicholas Piggin 
---
  arch/powerpc/include/asm/paravirt.h   | 23 
  arch/powerpc/include/asm/qspinlock.h  | 55 +++
  arch/powerpc/include/asm/qspinlock_paravirt.h |  5 ++
  arch/powerpc/platforms/pseries/Kconfig|  5 ++
  arch/powerpc/platforms/pseries/setup.c|  6 +-
  include/asm-generic/qspinlock.h   |  2 +
  6 files changed, 95 insertions(+), 1 deletion(-)
  create mode 100644 arch/powerpc/include/asm/qspinlock_paravirt.h

diff --git a/arch/powerpc/include/asm/paravirt.h 
b/arch/powerpc/include/asm/paravirt.h
index 7a8546660a63..5fae9dfa6fe9 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -29,6 +29,16 @@ static inline void yield_to_preempted(int cpu, u32 
yield_count)
  {
plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), 
yield_count);
  }
+
+static inline void prod_cpu(int cpu)
+{
+   plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
+}
+
+static inline void yield_to_any(void)
+{
+   plpar_hcall_norets(H_CONFER, -1, 0);
+}
  #else
  static inline bool is_shared_processor(void)
  {
@@ -45,6 +55,19 @@ static inline void yield_to_preempted(int cpu, u32 
yield_count)
  {
___bad_yield_to_preempted(); /* This would be a bug */
  }
+
+extern void ___bad_yield_to_any(void);
+static inline void yield_to_any(void)
+{
+   ___bad_yield_to_any(); /* This would be a bug */
+}
+
+extern void ___bad_prod_cpu(void);
+static inline void prod_cpu(int cpu)
+{
+   ___bad_prod_cpu(); /* This would be a bug */
+}
+
  #endif
  
  #define vcpu_is_preempted vcpu_is_preempted

diff --git a/arch/powerpc/include/asm/qspinlock.h 
b/arch/powerpc/include/asm/qspinlock.h
index f84da77b6bb7..997a9a32df77 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -3,9 +3,36 @@
  #define _ASM_POWERPC_QSPINLOCK_H
  
  #include 

+#include 
  
  #define _Q_PENDING_LOOPS	(1 << 9) /* not tuned */
  
+#ifdef CONFIG_PARAVIRT_SPINLOCKS

+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, 
u32 val)
+{
+   if (!is_shared_processor())
+   native_queued_spin_lock_slowpath(lock, val);
+   else
+   __pv_queued_spin_lock_slowpath(lock, val);
+}


You may need to match the use of __pv_queued_spin_lock_slowpath() with 
the corresponding __pv_queued_spin_unlock(), e.g.


#define queued_spin_unlock queued_spin_unlock
static inline queued_spin_unlock(struct qspinlock *lock)
{
    if (!is_shared_processor())
    smp_store_release(>locked, 0);
    else
    __pv_queued_spin_unlock(lock);
}

Otherwise, pv_kick() will never be called.

Cheers,
Longman

___
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Re: [PATCH 6/8] powerpc/pseries: implement paravirt qspinlocks for SPLPAR

2020-07-02 Thread kernel test robot
Hi Nicholas,

I love your patch! Yet something to improve:

[auto build test ERROR on powerpc/next]
[also build test ERROR on tip/locking/core v5.8-rc3 next-20200702]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use  as documented in
https://git-scm.com/docs/git-format-patch]

url:
https://github.com/0day-ci/linux/commits/Nicholas-Piggin/powerpc-queued-spinlocks-and-rwlocks/20200702-155158
base:   https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross 
ARCH=powerpc 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot 

All errors (new ones prefixed by >>):

   kernel/locking/lock_events.c:61:16: warning: no previous prototype for 
'lockevent_read' [-Wmissing-prototypes]
  61 | ssize_t __weak lockevent_read(struct file *file, char __user 
*user_buf,
 |^~
   kernel/locking/lock_events.c: In function 'skip_lockevent':
>> kernel/locking/lock_events.c:126:12: error: implicit declaration of function 
>> 'pv_is_native_spin_unlock' [-Werror=implicit-function-declaration]
 126 |   pv_on = !pv_is_native_spin_unlock();
 |^~~~
   cc1: some warnings being treated as errors

vim +/pv_is_native_spin_unlock +126 kernel/locking/lock_events.c

fb346fd9fc081c Waiman Long 2019-04-04   57  
fb346fd9fc081c Waiman Long 2019-04-04   58  /*
fb346fd9fc081c Waiman Long 2019-04-04   59   * The lockevent_read() function 
can be overridden.
fb346fd9fc081c Waiman Long 2019-04-04   60   */
fb346fd9fc081c Waiman Long 2019-04-04  @61  ssize_t __weak 
lockevent_read(struct file *file, char __user *user_buf,
fb346fd9fc081c Waiman Long 2019-04-04   62size_t 
count, loff_t *ppos)
fb346fd9fc081c Waiman Long 2019-04-04   63  {
fb346fd9fc081c Waiman Long 2019-04-04   64  char buf[64];
fb346fd9fc081c Waiman Long 2019-04-04   65  int cpu, id, len;
fb346fd9fc081c Waiman Long 2019-04-04   66  u64 sum = 0;
fb346fd9fc081c Waiman Long 2019-04-04   67  
fb346fd9fc081c Waiman Long 2019-04-04   68  /*
fb346fd9fc081c Waiman Long 2019-04-04   69   * Get the counter ID stored in 
file->f_inode->i_private
fb346fd9fc081c Waiman Long 2019-04-04   70   */
fb346fd9fc081c Waiman Long 2019-04-04   71  id = 
(long)file_inode(file)->i_private;
fb346fd9fc081c Waiman Long 2019-04-04   72  
fb346fd9fc081c Waiman Long 2019-04-04   73  if (id >= lockevent_num)
fb346fd9fc081c Waiman Long 2019-04-04   74  return -EBADF;
fb346fd9fc081c Waiman Long 2019-04-04   75  
fb346fd9fc081c Waiman Long 2019-04-04   76  for_each_possible_cpu(cpu)
fb346fd9fc081c Waiman Long 2019-04-04   77  sum += 
per_cpu(lockevents[id], cpu);
fb346fd9fc081c Waiman Long 2019-04-04   78  len = snprintf(buf, sizeof(buf) 
- 1, "%llu\n", sum);
fb346fd9fc081c Waiman Long 2019-04-04   79  
fb346fd9fc081c Waiman Long 2019-04-04   80  return 
simple_read_from_buffer(user_buf, count, ppos, buf, len);
fb346fd9fc081c Waiman Long 2019-04-04   81  }
fb346fd9fc081c Waiman Long 2019-04-04   82  
fb346fd9fc081c Waiman Long 2019-04-04   83  /*
fb346fd9fc081c Waiman Long 2019-04-04   84   * Function to handle write request
fb346fd9fc081c Waiman Long 2019-04-04   85   *
fb346fd9fc081c Waiman Long 2019-04-04   86   * When idx = reset_cnts, reset all 
the counts.
fb346fd9fc081c Waiman Long 2019-04-04   87   */
fb346fd9fc081c Waiman Long 2019-04-04   88  static ssize_t 
lockevent_write(struct file *file, const char __user *user_buf,
fb346fd9fc081c Waiman Long 2019-04-04   89 size_t 
count, loff_t *ppos)
fb346fd9fc081c Waiman Long 2019-04-04   90  {
fb346fd9fc081c Waiman Long 2019-04-04   91  int cpu;
fb346fd9fc081c Waiman Long 2019-04-04   92  
fb346fd9fc081c Waiman Long 2019-04-04   93  /*
fb346fd9fc081c Waiman Long 2019-04-04   94   * Get the counter ID stored in 
file->f_inode->i_private
fb346fd9fc081c Waiman Long 2019-04-04   95   */
fb346fd9fc081c Waiman Long 2019-04-04   96  if 
((long)file_inode(file)->i_private != LOCKEVENT_reset_cnts)
fb346fd9fc081c Waiman Long 2019-04-04   97  return count;
fb346fd9fc081c Waiman Long 2019-04-04   98  
fb346fd9fc081c Waiman Long 2019-04-04   99  for_each_possible_cpu(cpu) {
fb346fd9fc081c Waiman Long 2019-04-04  100  int i;
fb346fd9fc081c Waiman Long 2019-04-04  101  unsigned long *ptr = 
per_cpu_ptr(lockevents, cpu);
fb346fd9fc081c Waiman Long 2019-04-04  102  
fb346fd9fc081c Waiman Long 2019-04-04  103