Revision: 9845
Author: [email protected]
Date: Mon Oct 31 05:47:02 2011
Log: MIPS: updated atomic operations.
Specifically:
-fixed a bug in CompareAndSwap that caused randomly occuring timeouts on
MIPS boards.
-added gcc inline asm control push/pop instructions to correctly
save/restore the reorder setting instead of simply assuming it should be
enabled.
-reordered/optimized some instructions to utilize MIPS-specific pipelining
features (branch delay slot).
-fixed improper usage of write barriers
BUG=
TEST=
Review URL: http://codereview.chromium.org/8413073
Patch from Gergely Kis <[email protected]>.
http://code.google.com/p/v8/source/detail?r=9845
Modified:
/branches/bleeding_edge/src/atomicops_internals_mips_gcc.h
=======================================
--- /branches/bleeding_edge/src/atomicops_internals_mips_gcc.h Mon Mar 28
06:05:36 2011
+++ /branches/bleeding_edge/src/atomicops_internals_mips_gcc.h Mon Oct 31
05:47:02 2011
@@ -30,7 +30,7 @@
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define ATOMICOPS_COMPILER_BARRIER() __asm__
__volatile__("sync" : : : "memory")
+#define ATOMICOPS_COMPILER_BARRIER() __asm__
__volatile__("" : : : "memory")
namespace v8 {
namespace internal {
@@ -48,16 +48,19 @@
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- Atomic32 prev;
- __asm__ __volatile__("1:\n"
- "ll %0, %1\n" // prev = *ptr
+ Atomic32 prev, tmp;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %0, %5\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
- "nop\n" // delay slot nop
- "sc %2, %1\n" // *ptr = new_value (with atomic
check)
+ "move %2, %4\n" // tmp = new_value
+ "sc %2, %1\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
- : "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
+ ".set pop\n"
+ : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "Ir" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
@@ -68,12 +71,15 @@
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 temp, old;
- __asm__ __volatile__("1:\n"
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
"ll %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
+ ".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
@@ -87,13 +93,15 @@
Atomic32 increment) {
Atomic32 temp, temp2;
- __asm__ __volatile__("1:\n"
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
"ll %0, %2\n" // temp = *ptr
- "addu %0, %3\n" // temp = temp + increment
- "move %1, %0\n" // temp2 = temp
- "sc %0, %2\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ "sc %1, %2\n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ ".set pop\n"
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
@@ -103,6 +111,7 @@
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
+ ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
ATOMICOPS_COMPILER_BARRIER();
return res;
@@ -117,16 +126,19 @@
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
- return x;
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ ATOMICOPS_COMPILER_BARRIER();
+ return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ ATOMICOPS_COMPILER_BARRIER();
+ return res;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -134,7 +146,7 @@
}
inline void MemoryBarrier() {
- ATOMICOPS_COMPILER_BARRIER();
+ __asm__ __volatile__("sync" : : : "memory");
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev