tree:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git WIP.timers
head:   c0b7a5dbb870d1660aa5e566c5ce9972290a2bed
commit: 6a3164fa4cd35a587b5bb2e4bd86b75900af8286 [8/10] timer: Implement the 
hierarchical pull model
config: i386-randconfig-r0-201716 (attached as .config)
compiler: gcc-5 (Debian 5.4.1-2) 5.4.1 20160904
reproduce:
        git checkout 6a3164fa4cd35a587b5bb2e4bd86b75900af8286
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   In file included from kernel//time/timer_migration.c:16:0:
   kernel//time/timer_migration.h: In function 'tmigr_cpu_idle':
   kernel//time/timer_migration.h:26:1: warning: no return statement in 
function returning non-void [-Wreturn-type]
    static inline u64 tmigr_cpu_idle(u64 nextevt) { }
    ^
   kernel//time/timer_migration.c: In function '__tmigr_handle_remote':
>> kernel//time/timer_migration.c:190:4: error: implicit declaration of 
>> function 'timer_expire_remote' [-Werror=implicit-function-declaration]
       timer_expire_remote(evt->cpu);
       ^
   kernel//time/timer_migration.c: At top level:
>> kernel//time/timer_migration.c:223:6: error: redefinition of 
>> 'tmigr_handle_remote'
    void tmigr_handle_remote(void)
         ^
   In file included from kernel//time/timer_migration.c:16:0:
   kernel//time/timer_migration.h:25:20: note: previous definition of 
'tmigr_handle_remote' was here
    static inline void tmigr_handle_remote(void) { }
                       ^
>> kernel//time/timer_migration.c:348:5: error: redefinition of 'tmigr_cpu_idle'
    u64 tmigr_cpu_idle(u64 nextevt)
        ^
   In file included from kernel//time/timer_migration.c:16:0:
   kernel//time/timer_migration.h:26:19: note: previous definition of 
'tmigr_cpu_idle' was here
    static inline u64 tmigr_cpu_idle(u64 nextevt) { }
                      ^
>> kernel//time/timer_migration.c:406:6: error: redefinition of 
>> 'tmigr_cpu_activate'
    void tmigr_cpu_activate(void)
         ^
   In file included from kernel//time/timer_migration.c:16:0:
   kernel//time/timer_migration.h:27:20: note: previous definition of 
'tmigr_cpu_activate' was here
    static inline void tmigr_cpu_activate(void) { }
                       ^
   cc1: some warnings being treated as errors

vim +/timer_expire_remote +190 kernel//time/timer_migration.c

    10  #include <linux/slab.h>
    11  #include <linux/smp.h>
    12  #include <linux/spinlock.h>
    13  #include <linux/timerqueue.h>
    14  #include <linux/timer.h>
    15  
  > 16  #include "timer_migration.h"
    17  #include "tick-internal.h"
    18  
    19  #ifdef DEBUG
    20  # define DBG_BUG_ON(x)  BUG_ON(x)
    21  #else
    22  # define DBG_BUG_ON(x)
    23  #endif
    24  
    25  /* Per group capacity. Must be a power of 2! */
    26  static const unsigned int tmigr_childs_per_group = 8;
    27  
    28  bool tmigr_enabled __read_mostly;
    29  static unsigned int tmigr_hierarchy_levels __read_mostly;
    30  static unsigned int tmigr_crossnode_level __read_mostly;
    31  static struct list_head *tmigr_level_list __read_mostly;
    32  
    33  static DEFINE_MUTEX(tmigr_mutex);
    34  
    35  static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
    36  
    37  static void tmigr_add_evt(struct tmigr_group *group, struct tmigr_event 
*evt)
    38  {
    39          /*
    40           * Can be called with @evt == NULL, an already queued @evt or
    41           * an event that do not need to be queued (expires ==
    42           * KTIME_MAX)
    43           */
    44          if (!evt || !RB_EMPTY_NODE(&evt->nextevt.node) ||
    45              evt->nextevt.expires == KTIME_MAX)
    46                  return;
    47  
    48          /* @group->group event must not be queued in the parent group */
    49          DBG_BUG_ON(!RB_EMPTY_NODE(&group->groupevt.nextevt.node));
    50  
    51          /*  If this is the new first to expire event, update group 
event */
    52          if (timerqueue_add(&group->events, &evt->nextevt)) {
    53                  group->groupevt.nextevt.expires = evt->nextevt.expires;
    54                  group->groupevt.cpu = evt->cpu;
    55          }
    56  }
    57  
    58  static void tmigr_remove_evt(struct tmigr_group *group, struct 
tmigr_event *evt)
    59  {
    60          struct timerqueue_node *next;
    61          struct tmigr_event *nextevt;
    62          bool first;
    63  
    64          /*
    65           * It's safe to modify the group event of this group, because 
it is
    66           * not queued in the parent group.
    67           */
    68          DBG_BUG_ON(!RB_EMPTY_NODE(&group->groupevt.nextevt.node));
    69  
    70          /* Remove the child event, if pending */
    71          if (!evt || RB_EMPTY_NODE(&evt->nextevt.node))
    72                  return;
    73          /*
    74           * If this was the last queued event in the group, clear
    75           * the group event. If this was the first event to expire,
    76           * update the group.
    77           */
    78          first = (timerqueue_getnext(&group->events) == &evt->nextevt);
    79  
    80          if (!timerqueue_del(&group->events, &evt->nextevt)) {
    81                  group->groupevt.nextevt.expires = KTIME_MAX;
    82                  group->groupevt.cpu = TMIGR_NONE;
    83          } else if (first) {
    84                  next = timerqueue_getnext(&group->events);
    85                  nextevt = container_of(next, struct tmigr_event, 
nextevt);
    86                  group->groupevt.nextevt.expires = 
nextevt->nextevt.expires;
    87                  group->groupevt.cpu = nextevt->cpu;
    88          }
    89  }
    90  
    91  static void tmigr_update_remote(unsigned int cpu, u64 now, unsigned 
long jif)
    92  {
    93          struct tmigr_cpu *tmc = per_cpu_ptr(&tmigr_cpu, cpu);
    94          struct tmigr_group *group = tmc->tmgroup;
    95          u64 next_local, next_global;
    96  
    97          /*
    98           * Here the migrator CPU races with the target CPU.  The 
migrator
    99           * removed @tmc->nextevt from the group's queue, but it then 
dropped
   100           * the group lock.  Concurrently the target CPU might have 
serviced
   101           * an interrupt and therefore have called tmigr_cpu_activate() 
and
   102           * possibly tmigr_cpu_idle() which requeued CPUs @tmc into 
@group.
   103           *
   104           * Must hold @tmc->lock for changing @tmc->nextevt and 
@group->lock
   105           * to protect the timer queue of @group.
   106           */
   107          raw_spin_lock_irq(&tmc->lock);
   108          raw_spin_lock(&group->lock);
   109  
   110          /*
   111           * If the cpu went offline or marked itself active again, 
nothing
   112           * more to do.
   113           */
   114          if (!tmc->online || cpumask_test_cpu(cpu, group->cpus))
   115                  goto done;
   116  
   117          /*
   118           * Although __timgr_handle_remote() just dequeued the event, 
still
   119           * the target CPU might have added it again after the lock got
   120           * dropped. If it's queued the group queue is up to date.
   121           */
   122          if (!RB_EMPTY_NODE(&tmc->cpuevt.nextevt.node))
   123                  goto done;
   124  
   125          /*
   126           * Recalculate next event. Needs to be calculated while holding 
the
   127           * lock because the first expiring global timer could have been
   128           * removed since the last evaluation.
   129           */
   130          next_local = get_next_timer_interrupt(jif, now, &next_global);
   131  
   132          /*
   133           * If next_global is after next_local, event does not have to
   134           * be queued.
   135           */
   136          if (next_global >= next_local)
   137                  next_global = KTIME_MAX;
   138  
   139          tmc->cpuevt.nextevt.expires = next_global;
   140  
   141          /* Queue @cpu event (is not ne queued if expires == KTIME_MAX) 
*/
   142          tmigr_add_evt(group, &tmc->cpuevt);
   143  
   144  done:
   145          raw_spin_unlock(&group->lock);
   146          raw_spin_unlock_irq(&tmc->lock);
   147  }
   148  
   149  static void __tmigr_handle_remote(struct tmigr_group *group, unsigned 
int cpu,
   150                                    u64 now, unsigned long jif, bool 
walkup)
   151  {
   152          struct timerqueue_node *tmr;
   153          struct tmigr_group *parent;
   154          struct tmigr_event *evt;
   155  
   156  again:
   157          raw_spin_lock_irq(&group->lock);
   158          /*
   159           * Handle the group only if @cpu is the migrator or if the group
   160           * has no migrator. Otherwise the group is active and is 
handled by
   161           * its own migrator.
   162           */
   163          if (group->migrator != cpu && group->migrator != TMIGR_NONE) {
   164                  raw_spin_unlock_irq(&group->lock);
   165                  return;
   166          }
   167  
   168          tmr = timerqueue_getnext(&group->events);
   169          if (tmr && now >= tmr->expires) {
   170                  /*
   171                   * Remove the expired entry from the queue and handle
   172                   * it. If this is a leaf group, call the timer poll
   173                   * function for the given cpu. Otherwise handle the 
group
   174                   * itself.  Drop the group lock here in both cases to 
avoid
   175                   * lock ordering inversions.
   176                   */
   177                  evt = container_of(tmr, struct tmigr_event, nextevt);
   178                  tmigr_remove_evt(group, evt);
   179  
   180                  raw_spin_unlock_irq(&group->lock);
   181  
   182                  /*
   183                   * If the event is a group event walk down the 
hierarchy of
   184                   * that group to the CPU leafs. If not, handle the 
expired
   185                   * timer from the remote CPU.
   186                   */
   187                  if (evt->group) {
   188                          __tmigr_handle_remote(evt->group, cpu, now, 
jif, false);
   189                  } else {
 > 190                          timer_expire_remote(evt->cpu);
   191                          tmigr_update_remote(evt->cpu, now, jif);
   192                  }
   193                  goto again;
   194          }
   195  
   196          /*
   197           * If @group is not active, queue the next event in the parent
   198           * group. This is required, because the next event of @group
   199           * could have been changed by tmigr_update_remote() above.
   200           */
   201          parent = group->parent;
   202          if (parent && !group->active) {
   203                  raw_spin_lock_nested(&parent->lock, parent->level);
   204                  tmigr_add_evt(parent, &group->groupevt);
   205                  raw_spin_unlock(&parent->lock);
   206          }
   207          raw_spin_unlock_irq(&group->lock);
   208  
   209          /* Walk the hierarchy up? */
   210          if (!walkup || !parent)
   211                  return;
   212  
   213          /* Racy lockless check: See comment in tmigr_handle_remote() */
   214          if (parent->migrator == cpu)
   215                  __tmigr_handle_remote(parent, cpu, now, jif, true);
   216  }
   217  
   218  /**
   219   * tmigr_handle_remote - Handle migratable timers on remote idle CPUs
   220   *
   221   * Called from the timer soft interrupt with interrupts enabled.
   222   */
 > 223  void tmigr_handle_remote(void)
   224  {
   225          struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
   226          int cpu = smp_processor_id();
   227          unsigned long basej;
   228          ktime_t now;
   229  
   230          if (!tmigr_enabled)
   231                  return;
   232  
   233          /*
   234           * Check whether this CPU is responsible for handling the global
   235           * timers of other CPUs. Do a racy lockless check to avoid lock
   236           * contention for the busy case where timer soft interrupts 
happen
   237           * in parallel. It's not an issue, if the CPU misses a 
concurrent
   238           * update of the migrator role for its base group. It's not more
   239           * racy than doing this check under the lock, if the update 
happens
   240           * right after the lock is dropped. There is no damage in such a
   241           * case other than potentially expiring a global timer one tick
   242           * late.
   243           */
   244          if (tmc->tmgroup->migrator != cpu)
   245                  return;
   246  
   247          now = get_jiffies_update(&basej);
   248          __tmigr_handle_remote(tmc->tmgroup, cpu, now, basej, true);
   249  }
   250  
   251  /**
   252   * tmigr_set_cpu_inactive - Set a CPU inactive in the group
   253   * @group:      The group from which @cpu is removed
   254   * @child:      The child group which was updated before
   255   * @evt:        The event to queue in @group
   256   * @cpu:        The CPU which becomes inactive
   257   *
   258   * Remove @cpu from @group and propagate it through the hierarchy if
   259   * @cpu was the migrator of @group.
   260   *
   261   * Returns KTIME_MAX if @cpu is not the last outgoing CPU in the
   262   * hierarchy. Otherwise it returns the first expiring global event.
   263   */
   264  static u64 tmigr_set_cpu_inactive(struct tmigr_group *group,
   265                                    struct tmigr_group *child,
   266                                    struct tmigr_event *evt,
   267                                    unsigned int cpu)
   268  {
   269          struct tmigr_group *parent;
   270          u64 nextevt = KTIME_MAX;
   271  
   272          raw_spin_lock_nested(&group->lock, group->level);
   273  
   274          DBG_BUG_ON(!group->active);
   275  
   276          cpumask_clear_cpu(cpu, group->cpus);
   277          group->active--;
   278  
   279          /*
   280           * If @child is not NULL, then this is a recursive invocation to
   281           * propagate the deactivation of @cpu. If @child has a new 
migrator
   282           * set it active in @group.
   283           */
   284          if (child && child->migrator != TMIGR_NONE) {
   285                  cpumask_set_cpu(child->migrator, group->cpus);
   286                  group->active++;
   287          }
   288  
   289          /* Add @evt to @group */
   290          tmigr_add_evt(group, evt);
   291  
   292          /* If @cpu is not the active migrator, everything is up to date 
*/
   293          if (group->migrator != cpu)
   294                  goto done;
   295  
   296          /* Update the migrator. */
   297          if (!group->active)
   298                  group->migrator = TMIGR_NONE;
   299          else
   300                  group->migrator = cpumask_first(group->cpus);
   301  
   302          parent = group->parent;
   303          if (parent) {
   304                  /*
   305                   * @cpu was the migrator in @group, so it is marked as
   306                   * active in its parent group(s) as well. Propagate the
   307                   * migrator change.
   308                   */
   309                  evt = group->active ? NULL : &group->groupevt;
   310                  nextevt = tmigr_set_cpu_inactive(parent, group, evt, 
cpu);
   311          } else {
   312                  /*
   313                   * This is the top level of the hierarchy. If @cpu is 
about
   314                   * to go offline wake up some random other cpu so it 
will
   315                   * take over the migrator duty and program its timer
   316                   * proper. Ideally wake the cpu with the closest expiry
   317                   * time, but that's overkill to figure out.
   318                   */
   319                  if (!per_cpu(tmigr_cpu, cpu).online) {
   320                          cpu = cpumask_any_but(cpu_online_mask, cpu);
   321                          smp_send_reschedule(cpu);
   322                  }
   323                  /*
   324                   * Return the earliest event of the top level group to 
make
   325                   * sure that its handled.
   326                   *
   327                   * This could be optimized by keeping track of the last
   328                   * global scheduled event and only arming it on @cpu if 
the
   329                   * new event is earlier. Not sure if its worth the
   330                   * complexity.
   331                   */
   332                  nextevt = group->groupevt.nextevt.expires;
   333          }
   334  done:
   335          raw_spin_unlock(&group->lock);
   336          return nextevt;
   337  }
   338  
   339  /**
   340   * tmigr_cpu_idle - Put current CPU into idle state
   341   * @nextevt:    The next timer event set in the current CPU
   342   *
   343   * Returns either the next event of the current CPU or the next event 
from
   344   * the hierarchy if this CPU is the top level migrator.
   345   *
   346   * Must be called with interrupts disabled.
   347   */
 > 348  u64 tmigr_cpu_idle(u64 nextevt)
   349  {
   350          struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
   351          struct tmigr_group *group = tmc->tmgroup;

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to