Re: r296063 - Revert r291477 "[Frontend] Correct values of ATOMIC_*_LOCK_FREE to match builtin"
Merged to 4.0 in r296152. On Thu, Feb 23, 2017 at 5:16 PM, Hans Wennborg via cfe-commits <cfe-commits@lists.llvm.org> wrote: > Author: hans > Date: Thu Feb 23 19:16:34 2017 > New Revision: 296063 > > URL: http://llvm.org/viewvc/llvm-project?rev=296063=rev > Log: > Revert r291477 "[Frontend] Correct values of ATOMIC_*_LOCK_FREE to match > builtin" > > It caused PR31864. There is a patch in progress to fix that, but let's > revert in the meantime. > > Modified: > cfe/trunk/lib/Frontend/InitPreprocessor.cpp > cfe/trunk/test/Sema/atomic-ops.c > > Modified: cfe/trunk/lib/Frontend/InitPreprocessor.cpp > URL: > http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Frontend/InitPreprocessor.cpp?rev=296063=296062=296063=diff > == > --- cfe/trunk/lib/Frontend/InitPreprocessor.cpp (original) > +++ cfe/trunk/lib/Frontend/InitPreprocessor.cpp Thu Feb 23 19:16:34 2017 > @@ -286,12 +286,12 @@ static void DefineFastIntType(unsigned T > > /// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with > /// the specified properties. > -static const char *getLockFreeValue(unsigned TypeWidth, unsigned > InlineWidth) { > +static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign, > +unsigned InlineWidth) { >// Fully-aligned, power-of-2 sizes no larger than the inline >// width will be inlined as lock-free operations. > - // Note: we do not need to check alignment since _Atomic(T) is always > - // appropriately-aligned in clang. > - if ((TypeWidth & (TypeWidth - 1)) == 0 && TypeWidth <= InlineWidth) > + if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 && > + TypeWidth <= InlineWidth) > return "2"; // "always lock free" >// We cannot be certain what operations the lib calls might be >// able to implement as lock-free on future processors. > @@ -886,6 +886,7 @@ static void InitializePredefinedMacros(c > #define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \ > Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \ > getLockFreeValue(TI.get##Type##Width(), \ > + TI.get##Type##Align(), \ > InlineWidthBits)); > DEFINE_LOCK_FREE_MACRO(BOOL, Bool); > DEFINE_LOCK_FREE_MACRO(CHAR, Char); > @@ -898,6 +899,7 @@ static void InitializePredefinedMacros(c > DEFINE_LOCK_FREE_MACRO(LLONG, LongLong); > Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE", > getLockFreeValue(TI.getPointerWidth(0), > + TI.getPointerAlign(0), > InlineWidthBits)); > #undef DEFINE_LOCK_FREE_MACRO >} > > Modified: cfe/trunk/test/Sema/atomic-ops.c > URL: > http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Sema/atomic-ops.c?rev=296063=296062=296063=diff > == > --- cfe/trunk/test/Sema/atomic-ops.c (original) > +++ cfe/trunk/test/Sema/atomic-ops.c Thu Feb 23 19:16:34 2017 > @@ -14,7 +14,11 @@ _Static_assert(__GCC_ATOMIC_WCHAR_T_LOCK > _Static_assert(__GCC_ATOMIC_SHORT_LOCK_FREE == 2, ""); > _Static_assert(__GCC_ATOMIC_INT_LOCK_FREE == 2, ""); > _Static_assert(__GCC_ATOMIC_LONG_LOCK_FREE == 2, ""); > +#ifdef __i386__ > +_Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 1, ""); > +#else > _Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 2, ""); > +#endif > _Static_assert(__GCC_ATOMIC_POINTER_LOCK_FREE == 2, ""); > > _Static_assert(__c11_atomic_is_lock_free(1), ""); > > > ___ > cfe-commits mailing list > cfe-commits@lists.llvm.org > http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
Re: r291477 - [Frontend] Correct values of ATOMIC_*_LOCK_FREE to match builtin
This was reverted in r296063 due to PR31864. On Mon, Jan 9, 2017 at 12:54 PM, Michal Gorny via cfe-commitswrote: > Author: mgorny > Date: Mon Jan 9 14:54:20 2017 > New Revision: 291477 > > URL: http://llvm.org/viewvc/llvm-project?rev=291477=rev > Log: > [Frontend] Correct values of ATOMIC_*_LOCK_FREE to match builtin > > Correct the logic used to set ATOMIC_*_LOCK_FREE preprocessor macros not > to rely on the ABI alignment of types. Instead, just assume all those > types are aligned correctly by default since clang uses safe alignment > for _Atomic types even if the underlying types are aligned to a lower > boundary by default. > > For example, the 'long long' and 'double' types on x86 are aligned to > 32-bit boundary by default. However, '_Atomic long long' and '_Atomic > double' are aligned to 64-bit boundary, therefore satisfying > the requirements of lock-free atomic operations. > > This fixes PR #19355 by correcting the value of > __GCC_ATOMIC_LLONG_LOCK_FREE on x86, and therefore also fixing > the assumption made in libc++ tests. This also fixes PR #30581 by > applying a consistent logic between the functions used to implement > both interfaces. > > Differential Revision: https://reviews.llvm.org/D28213 > > Modified: > cfe/trunk/lib/Frontend/InitPreprocessor.cpp > cfe/trunk/test/Sema/atomic-ops.c > > Modified: cfe/trunk/lib/Frontend/InitPreprocessor.cpp > URL: > http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Frontend/InitPreprocessor.cpp?rev=291477=291476=291477=diff > == > --- cfe/trunk/lib/Frontend/InitPreprocessor.cpp (original) > +++ cfe/trunk/lib/Frontend/InitPreprocessor.cpp Mon Jan 9 14:54:20 2017 > @@ -286,12 +286,12 @@ static void DefineFastIntType(unsigned T > > /// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with > /// the specified properties. > -static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign, > -unsigned InlineWidth) { > +static const char *getLockFreeValue(unsigned TypeWidth, unsigned > InlineWidth) { >// Fully-aligned, power-of-2 sizes no larger than the inline >// width will be inlined as lock-free operations. > - if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 && > - TypeWidth <= InlineWidth) > + // Note: we do not need to check alignment since _Atomic(T) is always > + // appropriately-aligned in clang. > + if ((TypeWidth & (TypeWidth - 1)) == 0 && TypeWidth <= InlineWidth) > return "2"; // "always lock free" >// We cannot be certain what operations the lib calls might be >// able to implement as lock-free on future processors. > @@ -881,7 +881,6 @@ static void InitializePredefinedMacros(c > #define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \ > Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \ > getLockFreeValue(TI.get##Type##Width(), \ > - TI.get##Type##Align(), \ > InlineWidthBits)); > DEFINE_LOCK_FREE_MACRO(BOOL, Bool); > DEFINE_LOCK_FREE_MACRO(CHAR, Char); > @@ -894,7 +893,6 @@ static void InitializePredefinedMacros(c > DEFINE_LOCK_FREE_MACRO(LLONG, LongLong); > Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE", > getLockFreeValue(TI.getPointerWidth(0), > - TI.getPointerAlign(0), > InlineWidthBits)); > #undef DEFINE_LOCK_FREE_MACRO >} > > Modified: cfe/trunk/test/Sema/atomic-ops.c > URL: > http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Sema/atomic-ops.c?rev=291477=291476=291477=diff > == > --- cfe/trunk/test/Sema/atomic-ops.c (original) > +++ cfe/trunk/test/Sema/atomic-ops.c Mon Jan 9 14:54:20 2017 > @@ -14,11 +14,7 @@ _Static_assert(__GCC_ATOMIC_WCHAR_T_LOCK > _Static_assert(__GCC_ATOMIC_SHORT_LOCK_FREE == 2, ""); > _Static_assert(__GCC_ATOMIC_INT_LOCK_FREE == 2, ""); > _Static_assert(__GCC_ATOMIC_LONG_LOCK_FREE == 2, ""); > -#ifdef __i386__ > -_Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 1, ""); > -#else > _Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 2, ""); > -#endif > _Static_assert(__GCC_ATOMIC_POINTER_LOCK_FREE == 2, ""); > > _Static_assert(__c11_atomic_is_lock_free(1), ""); > > > ___ > cfe-commits mailing list > cfe-commits@lists.llvm.org > http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
r296063 - Revert r291477 "[Frontend] Correct values of ATOMIC_*_LOCK_FREE to match builtin"
Author: hans Date: Thu Feb 23 19:16:34 2017 New Revision: 296063 URL: http://llvm.org/viewvc/llvm-project?rev=296063=rev Log: Revert r291477 "[Frontend] Correct values of ATOMIC_*_LOCK_FREE to match builtin" It caused PR31864. There is a patch in progress to fix that, but let's revert in the meantime. Modified: cfe/trunk/lib/Frontend/InitPreprocessor.cpp cfe/trunk/test/Sema/atomic-ops.c Modified: cfe/trunk/lib/Frontend/InitPreprocessor.cpp URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Frontend/InitPreprocessor.cpp?rev=296063=296062=296063=diff == --- cfe/trunk/lib/Frontend/InitPreprocessor.cpp (original) +++ cfe/trunk/lib/Frontend/InitPreprocessor.cpp Thu Feb 23 19:16:34 2017 @@ -286,12 +286,12 @@ static void DefineFastIntType(unsigned T /// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with /// the specified properties. -static const char *getLockFreeValue(unsigned TypeWidth, unsigned InlineWidth) { +static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign, +unsigned InlineWidth) { // Fully-aligned, power-of-2 sizes no larger than the inline // width will be inlined as lock-free operations. - // Note: we do not need to check alignment since _Atomic(T) is always - // appropriately-aligned in clang. - if ((TypeWidth & (TypeWidth - 1)) == 0 && TypeWidth <= InlineWidth) + if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 && + TypeWidth <= InlineWidth) return "2"; // "always lock free" // We cannot be certain what operations the lib calls might be // able to implement as lock-free on future processors. @@ -886,6 +886,7 @@ static void InitializePredefinedMacros(c #define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \ Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \ getLockFreeValue(TI.get##Type##Width(), \ + TI.get##Type##Align(), \ InlineWidthBits)); DEFINE_LOCK_FREE_MACRO(BOOL, Bool); DEFINE_LOCK_FREE_MACRO(CHAR, Char); @@ -898,6 +899,7 @@ static void InitializePredefinedMacros(c DEFINE_LOCK_FREE_MACRO(LLONG, LongLong); Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE", getLockFreeValue(TI.getPointerWidth(0), + TI.getPointerAlign(0), InlineWidthBits)); #undef DEFINE_LOCK_FREE_MACRO } Modified: cfe/trunk/test/Sema/atomic-ops.c URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Sema/atomic-ops.c?rev=296063=296062=296063=diff == --- cfe/trunk/test/Sema/atomic-ops.c (original) +++ cfe/trunk/test/Sema/atomic-ops.c Thu Feb 23 19:16:34 2017 @@ -14,7 +14,11 @@ _Static_assert(__GCC_ATOMIC_WCHAR_T_LOCK _Static_assert(__GCC_ATOMIC_SHORT_LOCK_FREE == 2, ""); _Static_assert(__GCC_ATOMIC_INT_LOCK_FREE == 2, ""); _Static_assert(__GCC_ATOMIC_LONG_LOCK_FREE == 2, ""); +#ifdef __i386__ +_Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 1, ""); +#else _Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 2, ""); +#endif _Static_assert(__GCC_ATOMIC_POINTER_LOCK_FREE == 2, ""); _Static_assert(__c11_atomic_is_lock_free(1), ""); ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
r291477 - [Frontend] Correct values of ATOMIC_*_LOCK_FREE to match builtin
Author: mgorny Date: Mon Jan 9 14:54:20 2017 New Revision: 291477 URL: http://llvm.org/viewvc/llvm-project?rev=291477=rev Log: [Frontend] Correct values of ATOMIC_*_LOCK_FREE to match builtin Correct the logic used to set ATOMIC_*_LOCK_FREE preprocessor macros not to rely on the ABI alignment of types. Instead, just assume all those types are aligned correctly by default since clang uses safe alignment for _Atomic types even if the underlying types are aligned to a lower boundary by default. For example, the 'long long' and 'double' types on x86 are aligned to 32-bit boundary by default. However, '_Atomic long long' and '_Atomic double' are aligned to 64-bit boundary, therefore satisfying the requirements of lock-free atomic operations. This fixes PR #19355 by correcting the value of __GCC_ATOMIC_LLONG_LOCK_FREE on x86, and therefore also fixing the assumption made in libc++ tests. This also fixes PR #30581 by applying a consistent logic between the functions used to implement both interfaces. Differential Revision: https://reviews.llvm.org/D28213 Modified: cfe/trunk/lib/Frontend/InitPreprocessor.cpp cfe/trunk/test/Sema/atomic-ops.c Modified: cfe/trunk/lib/Frontend/InitPreprocessor.cpp URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Frontend/InitPreprocessor.cpp?rev=291477=291476=291477=diff == --- cfe/trunk/lib/Frontend/InitPreprocessor.cpp (original) +++ cfe/trunk/lib/Frontend/InitPreprocessor.cpp Mon Jan 9 14:54:20 2017 @@ -286,12 +286,12 @@ static void DefineFastIntType(unsigned T /// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with /// the specified properties. -static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign, -unsigned InlineWidth) { +static const char *getLockFreeValue(unsigned TypeWidth, unsigned InlineWidth) { // Fully-aligned, power-of-2 sizes no larger than the inline // width will be inlined as lock-free operations. - if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 && - TypeWidth <= InlineWidth) + // Note: we do not need to check alignment since _Atomic(T) is always + // appropriately-aligned in clang. + if ((TypeWidth & (TypeWidth - 1)) == 0 && TypeWidth <= InlineWidth) return "2"; // "always lock free" // We cannot be certain what operations the lib calls might be // able to implement as lock-free on future processors. @@ -881,7 +881,6 @@ static void InitializePredefinedMacros(c #define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \ Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \ getLockFreeValue(TI.get##Type##Width(), \ - TI.get##Type##Align(), \ InlineWidthBits)); DEFINE_LOCK_FREE_MACRO(BOOL, Bool); DEFINE_LOCK_FREE_MACRO(CHAR, Char); @@ -894,7 +893,6 @@ static void InitializePredefinedMacros(c DEFINE_LOCK_FREE_MACRO(LLONG, LongLong); Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE", getLockFreeValue(TI.getPointerWidth(0), - TI.getPointerAlign(0), InlineWidthBits)); #undef DEFINE_LOCK_FREE_MACRO } Modified: cfe/trunk/test/Sema/atomic-ops.c URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/Sema/atomic-ops.c?rev=291477=291476=291477=diff == --- cfe/trunk/test/Sema/atomic-ops.c (original) +++ cfe/trunk/test/Sema/atomic-ops.c Mon Jan 9 14:54:20 2017 @@ -14,11 +14,7 @@ _Static_assert(__GCC_ATOMIC_WCHAR_T_LOCK _Static_assert(__GCC_ATOMIC_SHORT_LOCK_FREE == 2, ""); _Static_assert(__GCC_ATOMIC_INT_LOCK_FREE == 2, ""); _Static_assert(__GCC_ATOMIC_LONG_LOCK_FREE == 2, ""); -#ifdef __i386__ -_Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 1, ""); -#else _Static_assert(__GCC_ATOMIC_LLONG_LOCK_FREE == 2, ""); -#endif _Static_assert(__GCC_ATOMIC_POINTER_LOCK_FREE == 2, ""); _Static_assert(__c11_atomic_is_lock_free(1), ""); ___ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits