tree:   https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git 
dev.2017.09.17a
head:   0cb8379979ad024d4024bd552bb0287d46a19b0c
commit: 0cb8379979ad024d4024bd552bb0287d46a19b0c [32/32] EXP 
smp/hotplug,lockdep: Annotate cpuhp_state
config: i386-randconfig-x079-201738 (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
        git checkout 0cb8379979ad024d4024bd552bb0287d46a19b0c
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   In file included from include/linux/spinlock_types.h:18:0,
                    from include/linux/spinlock.h:81,
                    from include/linux/wait.h:8,
                    from include/linux/wait_bit.h:7,
                    from include/linux/fs.h:5,
                    from include/linux/proc_fs.h:8,
                    from kernel/cpu.c:6:
   kernel/cpu.c: In function '_cpu_down':
>> kernel/cpu.c:721:6: error: 'cpuhp_state_down_key' undeclared (first use in 
>> this function)
        &cpuhp_state_down_key, 0);
         ^
   include/linux/lockdep.h:505:29: note: in definition of macro 
'lockdep_init_map'
      do { (void)(name); (void)(key); } while (0)
                                ^~~
   kernel/cpu.c:721:6: note: each undeclared identifier is reported only once 
for each function it appears in
        &cpuhp_state_down_key, 0);
         ^
   include/linux/lockdep.h:505:29: note: in definition of macro 
'lockdep_init_map'
      do { (void)(name); (void)(key); } while (0)
                                ^~~
   kernel/cpu.c: In function '_cpu_up':
>> kernel/cpu.c:837:6: error: 'cpuhp_state_up_key' undeclared (first use in 
>> this function)
        &cpuhp_state_up_key, 0);
         ^
   include/linux/lockdep.h:505:29: note: in definition of macro 
'lockdep_init_map'
      do { (void)(name); (void)(key); } while (0)
                                ^~~

vim +/cpuhp_state_down_key +721 kernel/cpu.c

   703  
   704  /* Requires cpu_add_remove_lock to be held */
   705  static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
   706                             enum cpuhp_state target)
   707  {
   708          struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
   709          int prev_state, ret = 0;
   710  
   711          if (num_online_cpus() == 1)
   712                  return -EBUSY;
   713  
   714          if (!cpu_present(cpu))
   715                  return -EINVAL;
   716  
   717          cpus_write_lock();
   718  
   719          lockdep_reinit_st_done();
   720          lockdep_init_map(&cpuhp_state_lock_map, "cpuhp_state-down",
 > 721                           &cpuhp_state_down_key, 0);
   722  
   723          cpuhp_tasks_frozen = tasks_frozen;
   724  
   725          prev_state = st->state;
   726          st->target = target;
   727          /*
   728           * If the current CPU state is in the range of the AP hotplug 
thread,
   729           * then we need to kick the thread.
   730           */
   731          if (st->state > CPUHP_TEARDOWN_CPU) {
   732                  ret = cpuhp_kick_ap_work(cpu);
   733                  /*
   734                   * The AP side has done the error rollback already. Just
   735                   * return the error code..
   736                   */
   737                  if (ret)
   738                          goto out;
   739  
   740                  /*
   741                   * We might have stopped still in the range of the AP 
hotplug
   742                   * thread. Nothing to do anymore.
   743                   */
   744                  if (st->state > CPUHP_TEARDOWN_CPU)
   745                          goto out;
   746          }
   747          /*
   748           * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
   749           * to do the further cleanups.
   750           */
   751          ret = cpuhp_down_callbacks(cpu, st, target);
   752          if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < 
prev_state) {
   753                  st->target = prev_state;
   754                  st->rollback = true;
   755                  cpuhp_kick_ap_work(cpu);
   756          }
   757  
   758  out:
   759          cpus_write_unlock();
   760          return ret;
   761  }
   762  
   763  static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
   764  {
   765          int err;
   766  
   767          cpu_maps_update_begin();
   768  
   769          if (cpu_hotplug_disabled) {
   770                  err = -EBUSY;
   771                  goto out;
   772          }
   773  
   774          err = _cpu_down(cpu, 0, target);
   775  
   776  out:
   777          cpu_maps_update_done();
   778          return err;
   779  }
   780  int cpu_down(unsigned int cpu)
   781  {
   782          return do_cpu_down(cpu, CPUHP_OFFLINE);
   783  }
   784  EXPORT_SYMBOL(cpu_down);
   785  
   786  #else
   787  #define takedown_cpu            NULL
   788  #endif /*CONFIG_HOTPLUG_CPU*/
   789  
   790  /**
   791   * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
   792   * @cpu: cpu that just started
   793   *
   794   * It must be called by the arch code on the new cpu, before the new cpu
   795   * enables interrupts and before the "boot" cpu returns from __cpu_up().
   796   */
   797  void notify_cpu_starting(unsigned int cpu)
   798  {
   799          struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
   800          enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
   801  
   802          rcu_cpu_starting(cpu);  /* Enables RCU usage on this CPU. */
   803          while (st->state < target) {
   804                  st->state++;
   805                  cpuhp_invoke_callback(cpu, st->state, true, NULL);
   806          }
   807  }
   808  
   809  /*
   810   * Called from the idle task. Wake up the controlling task which brings 
the
   811   * stopper and the hotplug thread of the upcoming CPU up and then 
delegates
   812   * the rest of the online bringup to the hotplug thread.
   813   */
   814  void cpuhp_online_idle(enum cpuhp_state state)
   815  {
   816          struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
   817  
   818          /* Happens for the boot cpu */
   819          if (state != CPUHP_AP_ONLINE_IDLE)
   820                  return;
   821  
   822          st->state = CPUHP_AP_ONLINE_IDLE;
   823          complete(&st->done);
   824  }
   825  
   826  /* Requires cpu_add_remove_lock to be held */
   827  static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state 
target)
   828  {
   829          struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
   830          struct task_struct *idle;
   831          int ret = 0;
   832  
   833          cpus_write_lock();
   834  
   835          lockdep_reinit_st_done();
   836          lockdep_init_map(&cpuhp_state_lock_map, "cpuhp_state-up",
 > 837                           &cpuhp_state_up_key, 0);
   838  
   839          if (!cpu_present(cpu)) {
   840                  ret = -EINVAL;
   841                  goto out;
   842          }
   843  
   844          /*
   845           * The caller of do_cpu_up might have raced with another
   846           * caller. Ignore it for now.
   847           */
   848          if (st->state >= target)
   849                  goto out;
   850  
   851          if (st->state == CPUHP_OFFLINE) {
   852                  /* Let it fail before we try to bring the cpu up */
   853                  idle = idle_thread_get(cpu);
   854                  if (IS_ERR(idle)) {
   855                          ret = PTR_ERR(idle);
   856                          goto out;
   857                  }
   858          }
   859  
   860          cpuhp_tasks_frozen = tasks_frozen;
   861  
   862          st->target = target;
   863          /*
   864           * If the current CPU state is in the range of the AP hotplug 
thread,
   865           * then we need to kick the thread once more.
   866           */
   867          if (st->state > CPUHP_BRINGUP_CPU) {
   868                  ret = cpuhp_kick_ap_work(cpu);
   869                  /*
   870                   * The AP side has done the error rollback already. Just
   871                   * return the error code..
   872                   */
   873                  if (ret)
   874                          goto out;
   875          }
   876  
   877          /*
   878           * Try to reach the target state. We max out on the BP at
   879           * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
   880           * responsible for bringing it up to the target state.
   881           */
   882          target = min((int)target, CPUHP_BRINGUP_CPU);
   883          ret = cpuhp_up_callbacks(cpu, st, target);
   884  out:
   885          cpus_write_unlock();
   886          return ret;
   887  }
   888  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to