Reviewers: jarin,

Description:
Fix runtime-atomics for Win 10 SDK and remove volatile

For unclear and probably accidental reasons the Windows 10 SDK
renamed some _Interlocked* functions to _InlineInterlocked. This
leads to these errors:

runtime-atomics.cc(159): error C3861: '_InterlockedExchange64': identifier not
found
runtime-atomics.cc(159): error C3861: '_InterlockedExchangeAdd64': identifier
not found
runtime-atomics.cc(159): error C3861: '_InterlockedAnd64': identifier not found runtime-atomics.cc(159): error C3861: '_InterlockedOr64': identifier not found runtime-atomics.cc(159): error C3861: '_InterlockedXor64': identifier not found

Fixing this requires either adding defines to map these five _Interlocked*
functions to _InlineInterlocked*, or else changing to using the
non-underscore versions. It appears that using the non-underscore versions
is preferable so I went that way. This also requires adding three  new
defines because there is a huge lack of consistency, probably due to these
macros being defined sometimes in <intrin.h> and sometimes in <winnt.h>

All five of the renamed 64-bit functions were manually checked to ensure
that the change to the non-underscore versions would make no differences -
the inline functions that they map to were identical. Other functions were
spot-checked.

Also, the 'volatile' qualifiers were removed. Volatile has no no useful
meaning for multi-threaded programming. It only exists in the Interlocked*
prototypes to *allow* volatile variables to be passed. Since this is a bad
habit to encourage there is no reason for us to permit it, and we can
still call the Microsoft functions (T* converts to volatile T*, just not
vice-versa).

The updated code builds with the Windows 8.1 SDK and with the Windows 10 SDK.

[email protected]
LOG=Y
BUG=440500,491424

Please review this at https://codereview.chromium.org/1228063005/

Base URL: https://chromium.googlesource.com/v8/v8.git@master

Affected files (+52, -47 lines):
  M src/runtime/runtime-atomics.cc


Index: src/runtime/runtime-atomics.cc
diff --git a/src/runtime/runtime-atomics.cc b/src/runtime/runtime-atomics.cc
index c9b78769cdc5a8630b3e254de8719c2d7e2583c5..e8c4d9e9c2ecd5344af7997fadee96ddf77b09db 100644
--- a/src/runtime/runtime-atomics.cc
+++ b/src/runtime/runtime-atomics.cc
@@ -103,12 +103,15 @@ inline void StoreSeqCst(uint64_t* p, uint64_t value) {

 #elif V8_CC_MSVC

-#define _InterlockedCompareExchange32 _InterlockedCompareExchange
-#define _InterlockedExchange32 _InterlockedExchange
-#define _InterlockedExchangeAdd32 _InterlockedExchangeAdd
-#define _InterlockedAnd32 _InterlockedAnd
-#define _InterlockedOr32 _InterlockedOr
-#define _InterlockedXor32 _InterlockedXor
+#define InterlockedCompareExchange32 _InterlockedCompareExchange
+#define InterlockedExchange32 _InterlockedExchange
+#define InterlockedExchangeAdd32 _InterlockedExchangeAdd
+#define InterlockedAnd32 _InterlockedAnd
+#define InterlockedOr32 _InterlockedOr
+#define InterlockedXor32 _InterlockedXor
+#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
+#define InterlockedCompareExchange8 _InterlockedCompareExchange8
+#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8

 #define INTEGER_TYPES(V)                           \
   V(int8_t, 8, char)                               \
@@ -120,52 +123,54 @@ inline void StoreSeqCst(uint64_t* p, uint64_t value) {
   V(int64_t, 64, LONGLONG)                         \
   V(uint64_t, 64, LONGLONG)

-#define ATOMIC_OPS(type, suffix, vctype) \ - inline type CompareExchangeSeqCst(volatile type* p, type oldval, \ - type newval) { \ - return _InterlockedCompareExchange##suffix( \ - reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(newval), \ - bit_cast<vctype>(oldval)); \ - } \ - inline type LoadSeqCst(volatile type* p) { return *p; } \ - inline void StoreSeqCst(volatile type* p, type value) { \ - _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \ - bit_cast<vctype>(value)); \ - } \ - inline type AddSeqCst(volatile type* p, type value) { \ - return _InterlockedExchangeAdd##suffix( \ - reinterpret_cast<volatile vctype*>(p), bit_cast<vctype>(value)); \ - } \ - inline type SubSeqCst(volatile type* p, type value) { \ - return _InterlockedExchangeAdd##suffix( \ - reinterpret_cast<volatile vctype*>(p), -bit_cast<vctype>(value)); \ - } \ - inline type AndSeqCst(volatile type* p, type value) { \ - return _InterlockedAnd##suffix(reinterpret_cast<volatile vctype*>(p), \ - bit_cast<vctype>(value)); \ - } \ - inline type OrSeqCst(volatile type* p, type value) { \ - return _InterlockedOr##suffix(reinterpret_cast<volatile vctype*>(p), \ - bit_cast<vctype>(value)); \ - } \ - inline type XorSeqCst(volatile type* p, type value) { \ - return _InterlockedXor##suffix(reinterpret_cast<volatile vctype*>(p), \ - bit_cast<vctype>(value)); \ - } \ - inline type ExchangeSeqCst(volatile type* p, type value) { \ - return _InterlockedExchange##suffix(reinterpret_cast<volatile vctype*>(p), \ - bit_cast<vctype>(value)); \ +#define ATOMIC_OPS(type, suffix, vctype) \ + inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ + return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ + bit_cast<vctype>(newval), \ + bit_cast<vctype>(oldval)); \ + } \ + inline type LoadSeqCst(type* p) { return *p; } \ + inline void StoreSeqCst(type* p, type value) { \ + InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ + bit_cast<vctype>(value)); \ + } \ + inline type AddSeqCst(type* p, type value) { \ + return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ + bit_cast<vctype>(value)); \ + } \ + inline type SubSeqCst(type* p, type value) { \ + return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ + -bit_cast<vctype>(value)); \ + } \ + inline type AndSeqCst(type* p, type value) { \ + return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ + bit_cast<vctype>(value)); \ + } \ + inline type OrSeqCst(type* p, type value) { \ + return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ + bit_cast<vctype>(value)); \ + } \ + inline type XorSeqCst(type* p, type value) { \ + return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ + bit_cast<vctype>(value)); \ + } \ + inline type ExchangeSeqCst(type* p, type value) { \ + return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ + bit_cast<vctype>(value)); \
   }
 INTEGER_TYPES(ATOMIC_OPS)
 #undef ATOMIC_OPS

 #undef INTEGER_TYPES
-#undef _InterlockedCompareExchange32
-#undef _InterlockedExchange32
-#undef _InterlockedExchangeAdd32
-#undef _InterlockedAnd32
-#undef _InterlockedOr32
-#undef _InterlockedXor32
+#undef InterlockedCompareExchange32
+#undef InterlockedExchange32
+#undef InterlockedExchangeAdd32
+#undef InterlockedAnd32
+#undef InterlockedOr32
+#undef InterlockedXor32
+#undef InterlockedExchangeAdd16
+#undef InterlockedCompareExchange8
+#undef InterlockedExchangeAdd8

 #else



--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to