Current code reads cp->elements[0].cpu and cp->elements[0].dl without
acquiring cpudl's lock. There are two problems on it:

   1. When we read elements[0].dl, the value can be broken on 32 bit
      machine because elements[0].dl is 64 bit data. We should guarantee
      it to be done atomically.

   2. Obsolete data can be read unless syncronizing with updaters:

      updater1      updater2      reader
      --------      --------      ------
      lock A
      set maxcpu = cpu1
      unlock A
                    lock A
                    set maxcpu = cpu2
                    unlock A
                                  read maxcpu, it might be -1

      where maxcpu was -1 initially.

When reading maxcpu, the value might be -1, we expect that should be
cpu2 though. So force readers also to be protected using the lock.

Signed-off-by: Byungchul Park <byungchul.p...@lge.com>
---
 kernel/sched/cpudeadline.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index f03479c..37bbb66 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -118,8 +118,14 @@ static inline u64 cpudl_maximum_dl(struct cpudl *cp)
 static int cpudl_fast_find(struct cpudl *cp, struct task_struct *p)
 {
        const struct sched_dl_entity *dl_se = &p->dl;
-       int max_cpu = cpudl_maximum_cpu(cp);
-       u64 max_dl = cpudl_maximum_dl(cp);
+       unsigned long flags;
+       int max_cpu;
+       u64 max_dl;
+
+       raw_spin_lock_irqsave(&cp->lock, flags);
+       max_cpu = cpudl_maximum_cpu(cp);
+       max_dl = cpudl_maximum_dl(cp);
+       raw_spin_unlock_irqrestore(&cp->lock, flags);
 
        if (cpumask_test_cpu(max_cpu, &p->cpus_allowed) &&
            dl_time_before(dl_se->deadline, max_dl))
-- 
1.9.1

Reply via email to