Extended testing of quad core configuration revealed that this fix was
insufficient. Specifically LTP open posix shm_op/23-1 would cause the
hardware livelock in llock/scond loop in update_cpu_load_active()

So remove this and make way for a proper workaround

This reverts commit a5c8b52abe677977883655166796f167ef1e0084.

Signed-off-by: Vineet Gupta <[email protected]>
---
 arch/arc/include/asm/atomic.h | 14 ++------------
 1 file changed, 2 insertions(+), 12 deletions(-)

diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 03484cb4d16d..20b7dc17979e 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -23,21 +23,13 @@
 
 #define atomic_set(v, i) (((v)->counter) = (i))
 
-#ifdef CONFIG_ISA_ARCV2
-#define PREFETCHW      "       prefetchw   [%1]        \n"
-#else
-#define PREFETCHW
-#endif
-
 #define ATOMIC_OP(op, c_op, asm_op)                                    \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
        unsigned int temp;                                              \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:                             \n"                             \
-       PREFETCHW                                                       \
-       "       llock   %0, [%1]        \n"                             \
+       "1:     llock   %0, [%1]        \n"                             \
        "       " #asm_op " %0, %0, %2  \n"                             \
        "       scond   %0, [%1]        \n"                             \
        "       bnz     1b              \n"                             \
@@ -58,9 +50,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v)    
        \
        smp_mb();                                                       \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:                             \n"                             \
-       PREFETCHW                                                       \
-       "       llock   %0, [%1]        \n"                             \
+       "1:     llock   %0, [%1]        \n"                             \
        "       " #asm_op " %0, %0, %2  \n"                             \
        "       scond   %0, [%1]        \n"                             \
        "       bnz     1b              \n"                             \
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to