Hi, I'm finally coming back to this issue.
On Sunday 26 August 2012 12:49:27 Julian Seward wrote:
> > I'm not sure what can be done then, to avoid a helgrind warning.
>
> If you can guarantee that "// some calculation goes here" touches only
> thread-local state, then there is only a race on sharedInt itself. In
> which case, use VALGRIND_HG_{DISABLE,ENABLE}_CHECKING to disable reporting
> on the relevant specific instances of the sharedInt.
This seems to be what I need.
VALGRIND_HG_DISABLE_CHECKING(&_q_value, sizeof(_q_value));
in loadAcquire silences the warning.
Surprisingly, VALGRIND_HG_ENABLE_CHECKING doesn't appear to work, though.
All races are suppressed, even obvious races that warn if disable+enable was
never used before. Testcase attached, see the call to oops(). In my tests,
ENABLE_CHECKING basically behaves like DISABLE_CHECKING (for instance if you
simply put a ENABLE_CHECKING at the beginning of loadAcquire and nothing else,
then there's no warning at all anymore).
> ----------
>
> My understanding of this is that it is in violation of the C++11 memory
> model, which says that "the implementation" may deliver stores from one
> core to another in any order, in the absence of any C++11 mandated inter-
> thread synchronisation.
>
> You can argue that for x86 the hardware's TSO guarantees make this
> harmless ...
>
> > Marc Mutz said "
> > The standard says it's racy, but the implementation of
>
> ... but AIUI, "the implementation" also includes the compiler, and I
> believe it has been observed that GCC will indeed break your code in
> unexpected ways, in some cases. In short you need to prove that not
> only the hardware won't reorder stores between cores -- which for x86
> happens to be true -- but also the compiler won't.
Yes but AFAIU that's what the "volatile" does -- prevent the compiler from
reordering.
However valgrind can't possibly find out that "volatile" was used, if all that
does is disable compiler optimizations, so I agree that this cannot all work
out of the box, valgrind-specific markup is definitely needed in the Qt atomics
class. Current version of my patch attached -- no re-ENABLE for now, since it
doesn't work anyway ;)
--
David Faure, fa...@kde.org, http://www.davidfaure.fr
Working on KDE, in particular KDE Frameworks 5
#include <pthread.h>
#include <stdio.h>
#include </d/other/inst/include/valgrind/helgrind.h>
template <typename T> struct QAtomicOps // qgenericatomic.h
{
static inline
T loadAcquire(const T &_q_value)
{
T tmp = *static_cast<const volatile T *>(&_q_value);
return tmp;
}
static inline
void storeRelease(T &_q_value, T newValue)
{
*static_cast<volatile T *>(&_q_value) = newValue;
}
};
class QBasicAtomicInt // qbasicatomic.h
{
public:
typedef QAtomicOps<int> Ops;
int _q_value;
int loadAcquire() const {
VALGRIND_HG_DISABLE_CHECKING(&_q_value, sizeof(_q_value));
const int ret = Ops::loadAcquire(_q_value);
VALGRIND_HG_ENABLE_CHECKING(&_q_value, sizeof(_q_value));
return ret;
}
void storeRelease(int newValue) { Ops::storeRelease(_q_value, newValue); }
void oops() { _q_value = 63; }
};
// Modelled after qt_metatype_id()
static int onDemandNumber() {
static QBasicAtomicInt sharedInt = { 0 };
if (!sharedInt.loadAcquire()) {
// some calculation goes here
sharedInt.storeRelease(41);
}
sharedInt.oops(); // ####### surely this should warn!
return sharedInt.loadAcquire();
}
void * threadStart(void *)
{
printf("%d\n", onDemandNumber());
printf("%d\n", onDemandNumber());
printf("%d\n", onDemandNumber());
printf("%d\n", onDemandNumber());
printf("%d\n", onDemandNumber());
printf("%d\n", onDemandNumber());
return 0;
}
int main( int argc, char** argv ) {
pthread_t thread1;
if ( pthread_create(&thread1, 0, threadStart, 0) )
return 1;
pthread_t thread2;
if ( pthread_create(&thread2, 0, threadStart, 0) )
return 1;
void* v;
pthread_join(thread1, &v);
pthread_join(thread2, &v);
return 0;
}
commit 605d6ce526063105cba69c1cddfbb9fd7833a47e
Author: David Faure <fa...@kde.org>
Date: Tue Oct 23 22:19:39 2012 +0200
Use helgrind macros to silence its warnings in QBasicAtomicInt/Pointer
Change-Id: I5134106e8ac0e5d124226b563bb892d725723ba4
diff --git a/src/corelib/thread/qbasicatomic.h b/src/corelib/thread/qbasicatomic.h
index 3e9c72b..b3d5a55 100644
--- a/src/corelib/thread/qbasicatomic.h
+++ b/src/corelib/thread/qbasicatomic.h
@@ -98,6 +98,19 @@
# error "Qt has not been ported to this platform"
#endif
+#if defined(Q_OS_LINUX) || defined(Q_OS_MAC)
+#define QTCORE_USE_HELGRIND
+#else
+#undef QTCORE_USE_HELGRIND
+#endif
+
+#ifdef QTCORE_USE_HELGRIND
+#include "helgrind_p.h"
+#define QT_HG_DISABLE_CHECKING(start, len) VALGRIND_HG_DISABLE_CHECKING(start, len)
+#else
+#define QT_HG_DISABLE_CHECKING(start, len)
+#endif
+
// Only include if the implementation has been ported to QAtomicOps
#ifndef QOLDBASICATOMIC_H
@@ -138,14 +151,26 @@ public:
// Atomic API, implemented in qatomic_XXX.h
- T loadAcquire() const Q_DECL_NOTHROW { return Ops::loadAcquire(_q_value); }
- void storeRelease(T newValue) Q_DECL_NOTHROW { Ops::storeRelease(_q_value, newValue); }
+ T loadAcquire() const Q_DECL_NOTHROW {
+ QT_HG_DISABLE_CHECKING(&_q_value, sizeof(_q_value));
+ return Ops::loadAcquire(_q_value);
+ }
+ void storeRelease(T newValue) Q_DECL_NOTHROW {
+ QT_HG_DISABLE_CHECKING(&_q_value, sizeof(_q_value));
+ Ops::storeRelease(_q_value, newValue);
+ }
static Q_DECL_CONSTEXPR bool isReferenceCountingNative() Q_DECL_NOTHROW { return Ops::isReferenceCountingNative(); }
static Q_DECL_CONSTEXPR bool isReferenceCountingWaitFree() Q_DECL_NOTHROW { return Ops::isReferenceCountingWaitFree(); }
- bool ref() Q_DECL_NOTHROW { return Ops::ref(_q_value); }
- bool deref() Q_DECL_NOTHROW { return Ops::deref(_q_value); }
+ bool ref() Q_DECL_NOTHROW {
+ QT_HG_DISABLE_CHECKING(&_q_value, sizeof(_q_value));
+ return Ops::ref(_q_value);
+ }
+ bool deref() Q_DECL_NOTHROW {
+ QT_HG_DISABLE_CHECKING(&_q_value, sizeof(_q_value));
+ return Ops::deref(_q_value);
+ }
static Q_DECL_CONSTEXPR bool isTestAndSetNative() Q_DECL_NOTHROW { return Ops::isTestAndSetNative(); }
static Q_DECL_CONSTEXPR bool isTestAndSetWaitFree() Q_DECL_NOTHROW { return Ops::isTestAndSetWaitFree(); }
@@ -208,8 +233,14 @@ public:
void store(Type newValue) Q_DECL_NOTHROW { _q_value = newValue; }
// Atomic API, implemented in qatomic_XXX.h
- Type loadAcquire() const Q_DECL_NOTHROW { return Ops::loadAcquire(_q_value); }
- void storeRelease(Type newValue) Q_DECL_NOTHROW { Ops::storeRelease(_q_value, newValue); }
+ Type loadAcquire() const Q_DECL_NOTHROW {
+ QT_HG_DISABLE_CHECKING(&_q_value, sizeof(_q_value));
+ return Ops::loadAcquire(_q_value);
+ }
+ void storeRelease(Type newValue) Q_DECL_NOTHROW {
+ QT_HG_DISABLE_CHECKING(&_q_value, sizeof(_q_value));
+ Ops::storeRelease(_q_value, newValue);
+ }
static Q_DECL_CONSTEXPR bool isTestAndSetNative() Q_DECL_NOTHROW { return Ops::isTestAndSetNative(); }
static Q_DECL_CONSTEXPR bool isTestAndSetWaitFree() Q_DECL_NOTHROW { return Ops::isTestAndSetWaitFree(); }
diff --git a/src/corelib/thread/helgrind_p.h b/src/corelib/thread/helgrind_p.h
new file mode 100644
[....]
diff --git a/src/corelib/thread/valgrind_p.h b/src/corelib/thread/valgrind_p.h
new file mode 100644
[....]
------------------------------------------------------------------------------
Everyone hates slow websites. So do we.
Make your web apps faster with AppDynamics
Download AppDynamics Lite for free today:
http://p.sf.net/sfu/appdyn_sfd2d_oct
_______________________________________________
Valgrind-users mailing list
Valgrind-users@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/valgrind-users