Revision: 20446
Author: [email protected]
Date: Wed Apr 2 16:37:33 2014 UTC
Log: ARM64: Fix and improve atomic operations.
* The 'compare and swap' operations should enforce memory ordering even when
the exchange does not occur.
* The exclusive monitor does not need to be cleared by CLREX if a LDRX was
not followed by a matching STREX.
* Use LDAR and STLR where possible.
* Use the 'I' and 'J' constraints to hint for constants valid for immediate
values.
[email protected], [email protected], [email protected]
Review URL: https://codereview.chromium.org/220793002
http://code.google.com/p/v8/source/detail?r=20446
Modified:
/branches/bleeding_edge/src/atomicops_internals_arm64_gcc.h
=======================================
--- /branches/bleeding_edge/src/atomicops_internals_arm64_gcc.h Wed Mar 26
18:04:30 2014 UTC
+++ /branches/bleeding_edge/src/atomicops_internals_arm64_gcc.h Wed Apr 2
16:37:33 2014 UTC
@@ -34,12 +34,16 @@
namespace internal {
inline void MemoryBarrier() {
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory
barrier.
- ::: "memory"
- ); // NOLINT
+ __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
}
+// NoBarrier versions of the operation include "memory" in the clobber
list.
+// This is not required for direct usage of the NoBarrier versions of the
+// operations. However this is required for correctness when they are used
as
+// part of the Acquire or Release versions, to ensure that nothing from
outside
+// the call is reordered between the operation and the memory barrier.
This does
+// not change the code generated, so has no or minimal impact on the
+// NoBarrier operations.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
@@ -55,13 +59,12 @@
"stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new
value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not
work.
"1: \n\t"
- "clrex \n\t" // In case we didn't
swap.
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
- : [old_value]"r" (old_value),
+ : [old_value]"IJr" (old_value),
[new_value]"r" (new_value)
- : "memory", "cc"
+ : "cc", "memory"
); // NOLINT
return prev;
@@ -101,7 +104,7 @@
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
- : [increment]"r" (increment)
+ : [increment]"IJr" (increment)
: "memory"
); // NOLINT
@@ -110,8 +113,10 @@
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
+ Atomic32 result;
+
MemoryBarrier();
- Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+ result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return result;
@@ -121,27 +126,9 @@
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
- int32_t temp;
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[prev], %[ptr] \n\t" // Load the previous
value.
- "cmp %w[prev], %w[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new
value.
- "cbnz %w[temp], 0b \n\t" // Retry if it did not
work.
- "dmb ish \n\t" // Data memory barrier.
- "1: \n\t"
- // If the compare failed the 'dmb' is unnecessary, but we still need a
- // 'clrex'.
- "clrex \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [old_value]"r" (old_value),
- [new_value]"r" (new_value)
- : "memory", "cc"
- ); // NOLINT
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
return prev;
}
@@ -150,27 +137,9 @@
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
- int32_t temp;
MemoryBarrier();
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[prev], %[ptr] \n\t" // Load the previous
value.
- "cmp %w[prev], %w[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new
value.
- "cbnz %w[temp], 0b \n\t" // Retry if it did not
work.
- "1: \n\t"
- // If the compare failed the we still need a 'clrex'.
- "clrex \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [old_value]"r" (old_value),
- [new_value]"r" (new_value)
- : "memory", "cc"
- ); // NOLINT
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return prev;
}
@@ -185,8 +154,12 @@
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
+ __asm__ __volatile__ ( // NOLINT
+ "stlr %w[value], %[ptr] \n\t"
+ : [ptr]"=Q" (*ptr)
+ : [value]"r" (value)
+ : "memory"
+ ); // NOLINT
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
@@ -194,8 +167,15 @@
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
+ Atomic32 value;
+
+ __asm__ __volatile__ ( // NOLINT
+ "ldar %w[value], %[ptr] \n\t"
+ : [value]"=r" (value)
+ : [ptr]"Q" (*ptr)
+ : "memory"
+ ); // NOLINT
+
return value;
}
@@ -221,13 +201,12 @@
"stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
"1: \n\t"
- "clrex \n\t"
: [prev]"=&r" (prev),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
- : [old_value]"r" (old_value),
+ : [old_value]"IJr" (old_value),
[new_value]"r" (new_value)
- : "memory", "cc"
+ : "cc", "memory"
); // NOLINT
return prev;
@@ -267,7 +246,7 @@
: [result]"=&r" (result),
[temp]"=&r" (temp),
[ptr]"+Q" (*ptr)
- : [increment]"r" (increment)
+ : [increment]"IJr" (increment)
: "memory"
); // NOLINT
@@ -276,8 +255,10 @@
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
+ Atomic64 result;
+
MemoryBarrier();
- Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
+ result = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return result;
@@ -287,25 +268,9 @@
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
- int32_t temp;
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[prev], %[ptr] \n\t"
- "cmp %[prev], %[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %[new_value], %[ptr] \n\t"
- "cbnz %w[temp], 0b \n\t"
- "dmb ish \n\t"
- "1: \n\t"
- "clrex \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [old_value]"r" (old_value),
- [new_value]"r" (new_value)
- : "memory", "cc"
- ); // NOLINT
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
return prev;
}
@@ -314,26 +279,9 @@
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev;
- int32_t temp;
MemoryBarrier();
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[prev], %[ptr] \n\t"
- "cmp %[prev], %[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %[new_value], %[ptr] \n\t"
- "cbnz %w[temp], 0b \n\t"
- "1: \n\t"
- "clrex \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [old_value]"r" (old_value),
- [new_value]"r" (new_value)
- : "memory", "cc"
- ); // NOLINT
+ prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
return prev;
}
@@ -348,8 +296,12 @@
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
+ __asm__ __volatile__ ( // NOLINT
+ "stlr %x[value], %[ptr] \n\t"
+ : [ptr]"=Q" (*ptr)
+ : [value]"r" (value)
+ : "memory"
+ ); // NOLINT
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
@@ -357,8 +309,15 @@
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- MemoryBarrier();
+ Atomic32 value;
+
+ __asm__ __volatile__ ( // NOLINT
+ "ldar %x[value], %[ptr] \n\t"
+ : [value]"=r" (value)
+ : [ptr]"Q" (*ptr)
+ : "memory"
+ ); // NOLINT
+
return value;
}
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.