This is a note to let you know that I've just added the patch titled
ARM: sched_clock: Load cycle count after epoch stabilizes
to the 3.10-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary
The filename of the patch is:
arm-sched_clock-load-cycle-count-after-epoch-stabilizes.patch
and it can be found in the queue-3.10 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <[email protected]> know about it.
>From 336ae1180df5f69b9e0fb6561bec01c5f64361cf Mon Sep 17 00:00:00 2001
From: Stephen Boyd <[email protected]>
Date: Mon, 17 Jun 2013 15:40:58 -0700
Subject: ARM: sched_clock: Load cycle count after epoch stabilizes
From: Stephen Boyd <[email protected]>
commit 336ae1180df5f69b9e0fb6561bec01c5f64361cf upstream.
There is a small race between when the cycle count is read from
the hardware and when the epoch stabilizes. Consider this
scenario:
CPU0 CPU1
---- ----
cyc = read_sched_clock()
cyc_to_sched_clock()
update_sched_clock()
...
cd.epoch_cyc = cyc;
epoch_cyc = cd.epoch_cyc;
...
epoch_ns + cyc_to_ns((cyc - epoch_cyc)
The cyc on cpu0 was read before the epoch changed. But we
calculate the nanoseconds based on the new epoch by subtracting
the new epoch from the old cycle count. Since epoch is most likely
larger than the old cycle count we calculate a large number that
will be converted to nanoseconds and added to epoch_ns, causing
time to jump forward too much.
Fix this problem by reading the hardware after the epoch has
stabilized.
Cc: Russell King <[email protected]>
Signed-off-by: Stephen Boyd <[email protected]>
Signed-off-by: John Stultz <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>
---
arch/arm/kernel/sched_clock.c | 13 +++++--------
1 file changed, 5 insertions(+), 8 deletions(-)
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -51,10 +51,11 @@ static inline u64 notrace cyc_to_ns(u64
return (cyc * mult) >> shift;
}
-static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
+static unsigned long long notrace sched_clock_32(void)
{
u64 epoch_ns;
u32 epoch_cyc;
+ u32 cyc;
if (cd.suspended)
return cd.epoch_ns;
@@ -73,7 +74,9 @@ static unsigned long long notrace cyc_to
smp_rmb();
} while (epoch_cyc != cd.epoch_cyc_copy);
- return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult,
cd.shift);
+ cyc = read_sched_clock();
+ cyc = (cyc - epoch_cyc) & sched_clock_mask;
+ return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
}
/*
@@ -165,12 +168,6 @@ void __init setup_sched_clock(u32 (*read
pr_debug("Registered %pF as sched_clock source\n", read);
}
-static unsigned long long notrace sched_clock_32(void)
-{
- u32 cyc = read_sched_clock();
- return cyc_to_sched_clock(cyc, sched_clock_mask);
-}
-
unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
unsigned long long notrace sched_clock(void)
Patches currently in stable-queue which might be from [email protected] are
queue-3.10/arm-sched_clock-load-cycle-count-after-epoch-stabilizes.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html