https://github.com/melver updated 
https://github.com/llvm/llvm-project/pull/142955

>From dc3e720da86ecee117604f54058036e0701026e2 Mon Sep 17 00:00:00 2001
From: Marco Elver <el...@google.com>
Date: Wed, 21 May 2025 23:49:48 +0200
Subject: [PATCH] Thread Safety Analysis: Basic capability alias-analysis

Add basic alias analysis for capabilities by reusing LocalVariableMap,
which tracks currently valid definitions of variables. Aliases created
through complex control flow are not tracked. This implementation would
satisfy the basic needs of addressing the concerns for Linux kernel
application [1].

For example, the analysis will no longer generate false positives for
cases such as (and many others):

        void testNestedAccess(Container *c) {
          Foo *ptr = &c->foo;
          ptr->mu.Lock();
          c->foo.data = 42;  // OK - no false positive
          ptr->mu.Unlock();
        }

        void testNestedAcquire(Container *c) 
EXCLUSIVE_LOCK_FUNCTION(&c->foo.mu) {
          Foo *buf = &c->foo;
          buf->mu.Lock();  // OK - no false positive
        }

Given the analysis is now able to identify potentially unsafe patterns
it was not able to identify previously (see added FIXME test case for an
example), mark alias resolution as a "beta" feature behind the flag
`-Wthread-safety-beta`.

Fixing LocalVariableMap:  It was found that LocalVariableMap was not
properly tracking loop-invariant aliases: the old implementation failed
because the merge logic compared raw VarDefinition IDs. The algorithm
for handling back-edges (in createReferenceContext()) generates new
'reference' definitions for loop-scoped variables. Later ID comparison
caused alias invalidation at back-edge merges (in intersectBackEdge())
and at subsequent forward-merges with non-loop paths (in intersectContexts()).

Fix LocalVariableMap by adding the getCanonicalDefinitionID() helper
that resolves any definition ID down to its non-reference base. As a
result, a variable's definition is preserved across control-flow merges
as long as its underlying canonical definition remains the same.

Link: 
https://lore.kernel.org/all/CANpmjNPquO=W1JAh1FNQb8pMQjgeZAKCPQUAd7qUg=5pjJ6x=q...@mail.gmail.com/
 [1]
---
 .../Analysis/Analyses/ThreadSafetyCommon.h    |  20 +-
 clang/lib/Analysis/ThreadSafety.cpp           | 147 +++++--
 clang/lib/Analysis/ThreadSafetyCommon.cpp     |  40 +-
 clang/test/Sema/warn-thread-safety-analysis.c |   6 +-
 .../SemaCXX/warn-thread-safety-analysis.cpp   | 362 +++++++++++++++++-
 5 files changed, 544 insertions(+), 31 deletions(-)

diff --git a/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h 
b/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
index 6c97905a2d7f9..d20f172f446e6 100644
--- a/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
+++ b/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
@@ -35,6 +35,7 @@
 #include "llvm/ADT/PointerUnion.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/Support/Casting.h"
+#include <functional>
 #include <sstream>
 #include <string>
 #include <utility>
@@ -386,6 +387,11 @@ class SExprBuilder {
     SelfVar->setKind(til::Variable::VK_SFun);
   }
 
+  // Create placeholder for this: we don't know the VarDecl on construction 
yet.
+  til::LiteralPtr *createThisPlaceholder() {
+    return new (Arena) til::LiteralPtr(nullptr);
+  }
+
   // Translate a clang expression in an attribute to a til::SExpr.
   // Constructs the context from D, DeclExp, and SelfDecl.
   CapabilityExpr translateAttrExpr(const Expr *AttrExp, const NamedDecl *D,
@@ -394,8 +400,8 @@ class SExprBuilder {
 
   CapabilityExpr translateAttrExpr(const Expr *AttrExp, CallingContext *Ctx);
 
-  // Translate a variable reference.
-  til::LiteralPtr *createVariable(const VarDecl *VD);
+  // Translate a VarDecl to its canonical TIL expression.
+  til::SExpr *translateVariable(const VarDecl *VD, CallingContext *Ctx);
 
   // Translate a clang statement or expression to a TIL expression.
   // Also performs substitution of variables; Ctx provides the context.
@@ -412,6 +418,10 @@ class SExprBuilder {
   const til::SCFG *getCFG() const { return Scfg; }
   til::SCFG *getCFG() { return Scfg; }
 
+  void setLookupLocalVarExpr(std::function<const Expr *(const NamedDecl *)> F) 
{
+    LookupLocalVarExpr = std::move(F);
+  }
+
 private:
   // We implement the CFGVisitor API
   friend class CFGWalker;
@@ -445,6 +455,7 @@ class SExprBuilder {
       const AbstractConditionalOperator *C, CallingContext *Ctx);
 
   til::SExpr *translateDeclStmt(const DeclStmt *S, CallingContext *Ctx);
+  til::SExpr *translateStmtExpr(const StmtExpr *SE, CallingContext *Ctx);
 
   // Map from statements in the clang CFG to SExprs in the til::SCFG.
   using StatementMap = llvm::DenseMap<const Stmt *, til::SExpr *>;
@@ -531,6 +542,11 @@ class SExprBuilder {
   std::vector<til::Phi *> IncompleteArgs;
   til::BasicBlock *CurrentBB = nullptr;
   BlockInfo *CurrentBlockInfo = nullptr;
+
+  // Recursion guard.
+  llvm::DenseSet<const ValueDecl *> VarsBeingTranslated;
+  // Context-dependent lookup of currently valid definitions of local 
variables.
+  std::function<const Expr *(const NamedDecl *)> LookupLocalVarExpr;
 };
 
 #ifndef NDEBUG
diff --git a/clang/lib/Analysis/ThreadSafety.cpp 
b/clang/lib/Analysis/ThreadSafety.cpp
index 026d0308921a5..70eae4a4dd882 100644
--- a/clang/lib/Analysis/ThreadSafety.cpp
+++ b/clang/lib/Analysis/ThreadSafety.cpp
@@ -39,6 +39,7 @@
 #include "llvm/ADT/DenseMap.h"
 #include "llvm/ADT/ImmutableMap.h"
 #include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/Support/Allocator.h"
@@ -537,6 +538,13 @@ class LocalVariableMap {
 protected:
   friend class VarMapBuilder;
 
+  // Resolve any definition ID down to its non-reference base ID.
+  unsigned getCanonicalDefinitionID(unsigned ID) {
+    while (ID > 0 && VarDefinitions[ID].isReference())
+      ID = VarDefinitions[ID].Ref;
+    return ID;
+  }
+
   // Get the current context index
   unsigned getContextIndex() { return SavedContexts.size()-1; }
 
@@ -621,6 +629,7 @@ class VarMapBuilder : public 
ConstStmtVisitor<VarMapBuilder> {
 
   void VisitDeclStmt(const DeclStmt *S);
   void VisitBinaryOperator(const BinaryOperator *BO);
+  void VisitCallExpr(const CallExpr *CE);
 };
 
 } // namespace
@@ -666,6 +675,56 @@ void VarMapBuilder::VisitBinaryOperator(const 
BinaryOperator *BO) {
   }
 }
 
+// Invalidates local variable definitions if variable escaped.
+void VarMapBuilder::VisitCallExpr(const CallExpr *CE) {
+  const FunctionDecl *FD = CE->getDirectCallee();
+  if (!FD)
+    return;
+
+  // Heuristic for likely-benign functions that pass by mutable reference. This
+  // is needed to avoid a slew of false positives due to mutable reference
+  // passing where the captured reference is usually passed on by-value.
+  if (const IdentifierInfo *II = FD->getIdentifier()) {
+    // Any kind of std::bind-like functions.
+    if (II->isStr("bind") || II->isStr("bind_front"))
+      return;
+  }
+
+  // Invalidate local variable definitions that are passed by non-const
+  // reference or non-const pointer.
+  for (unsigned Idx = 0; Idx < CE->getNumArgs(); ++Idx) {
+    if (Idx >= FD->getNumParams())
+      break;
+
+    const Expr *Arg = CE->getArg(Idx)->IgnoreParenImpCasts();
+    const ParmVarDecl *PVD = FD->getParamDecl(Idx);
+    QualType ParamType = PVD->getType();
+
+    // Potential reassignment if passed by non-const reference / pointer.
+    const ValueDecl *VDec = nullptr;
+    if (ParamType->isReferenceType() &&
+        !ParamType->getPointeeType().isConstQualified()) {
+      if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg))
+        VDec = DRE->getDecl();
+    } else if (ParamType->isPointerType() &&
+               !ParamType->getPointeeType().isConstQualified()) {
+      Arg = Arg->IgnoreParenCasts();
+      if (const auto *UO = dyn_cast<UnaryOperator>(Arg)) {
+        if (UO->getOpcode() == UO_AddrOf) {
+          const Expr *SubE = UO->getSubExpr()->IgnoreParenCasts();
+          if (const auto *DRE = dyn_cast<DeclRefExpr>(SubE))
+            VDec = DRE->getDecl();
+        }
+      }
+    }
+
+    if (VDec && Ctx.lookup(VDec)) {
+      Ctx = VMap->clearDefinition(VDec, Ctx);
+      VMap->saveContext(CE, Ctx);
+    }
+  }
+}
+
 // Computes the intersection of two contexts.  The intersection is the
 // set of variables which have the same definition in both contexts;
 // variables with different definitions are discarded.
@@ -674,11 +733,16 @@ LocalVariableMap::intersectContexts(Context C1, Context 
C2) {
   Context Result = C1;
   for (const auto &P : C1) {
     const NamedDecl *Dec = P.first;
-    const unsigned *i2 = C2.lookup(Dec);
-    if (!i2)             // variable doesn't exist on second path
+    const unsigned *I2 = C2.lookup(Dec);
+    if (!I2) {
+      // The variable doesn't exist on second path.
       Result = removeDefinition(Dec, Result);
-    else if (*i2 != P.second)  // variable exists, but has different definition
+    } else if (getCanonicalDefinitionID(P.second) !=
+               getCanonicalDefinitionID(*I2)) {
+      // If canonical definitions mismatch the underlying definitions are
+      // different, invalidate.
       Result = clearDefinition(Dec, Result);
+    }
   }
   return Result;
 }
@@ -698,13 +762,22 @@ LocalVariableMap::Context 
LocalVariableMap::createReferenceContext(Context C) {
 // createReferenceContext.
 void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
   for (const auto &P : C1) {
-    unsigned i1 = P.second;
-    VarDefinition *VDef = &VarDefinitions[i1];
+    const unsigned I1 = P.second;
+    VarDefinition *VDef = &VarDefinitions[I1];
     assert(VDef->isReference());
 
-    const unsigned *i2 = C2.lookup(P.first);
-    if (!i2 || (*i2 != i1))
-      VDef->Ref = 0;    // Mark this variable as undefined
+    const unsigned *I2 = C2.lookup(P.first);
+    if (!I2) {
+      // Variable does not exist at the end of the loop, invalidate.
+      VDef->Ref = 0;
+      continue;
+    }
+
+    // Compare the canonical IDs. This correctly handles chains of references
+    // and determines if the variable is truly loop-invariant.
+    if (getCanonicalDefinitionID(VDef->Ref) != getCanonicalDefinitionID(*I2)) {
+      VDef->Ref = 0; // Mark this variable as undefined
+    }
   }
 }
 
@@ -1196,11 +1269,10 @@ class ThreadSafetyAnalyzer {
 
   void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
                           const Expr *Exp, AccessKind AK, Expr *MutexExp,
-                          ProtectedOperationKind POK, til::LiteralPtr *Self,
+                          ProtectedOperationKind POK, til::SExpr *Self,
                           SourceLocation Loc);
   void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr 
*Exp,
-                       Expr *MutexExp, til::LiteralPtr *Self,
-                       SourceLocation Loc);
+                       Expr *MutexExp, til::SExpr *Self, SourceLocation Loc);
 
   void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
                    ProtectedOperationKind POK);
@@ -1597,6 +1669,16 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& 
Result,
   const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
   const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
 
+  // Temporarily set the lookup context for SExprBuilder.
+  SxBuilder.setLookupLocalVarExpr([&](const NamedDecl *D) -> const Expr * {
+    if (!Handler.issueBetaWarnings())
+      return nullptr;
+    auto Ctx = LVarCtx;
+    return LocalVarMap.lookupExpr(D, Ctx);
+  });
+  auto Cleanup = llvm::make_scope_exit(
+      [this] { SxBuilder.setLookupLocalVarExpr(nullptr); });
+
   const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
   if (!Exp)
     return;
@@ -1653,7 +1735,7 @@ class BuildLockset : public 
ConstStmtVisitor<BuildLockset> {
   }
 
   void handleCall(const Expr *Exp, const NamedDecl *D,
-                  til::LiteralPtr *Self = nullptr,
+                  til::SExpr *Self = nullptr,
                   SourceLocation Loc = SourceLocation());
   void examineArguments(const FunctionDecl *FD,
                         CallExpr::const_arg_iterator ArgBegin,
@@ -1665,7 +1747,17 @@ class BuildLockset : public 
ConstStmtVisitor<BuildLockset> {
                const FactSet &FunctionExitFSet)
       : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
         FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
-        CtxIndex(Info.EntryIndex) {}
+        CtxIndex(Info.EntryIndex) {
+    Analyzer->SxBuilder.setLookupLocalVarExpr(
+        [this](const NamedDecl *D) -> const Expr * {
+          if (!Analyzer->Handler.issueBetaWarnings())
+            return nullptr;
+          auto Ctx = LVarCtx;
+          return Analyzer->LocalVarMap.lookupExpr(D, Ctx);
+        });
+  }
+
+  ~BuildLockset() { Analyzer->SxBuilder.setLookupLocalVarExpr(nullptr); }
 
   void VisitUnaryOperator(const UnaryOperator *UO);
   void VisitBinaryOperator(const BinaryOperator *BO);
@@ -1683,7 +1775,7 @@ class BuildLockset : public 
ConstStmtVisitor<BuildLockset> {
 /// of at least the passed in AccessKind.
 void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
     const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
-    Expr *MutexExp, ProtectedOperationKind POK, til::LiteralPtr *Self,
+    Expr *MutexExp, ProtectedOperationKind POK, til::SExpr *Self,
     SourceLocation Loc) {
   LockKind LK = getLockKindFromAccessKind(AK);
   CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
@@ -1742,8 +1834,7 @@ void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
 /// Warn if the LSet contains the given lock.
 void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
                                            const NamedDecl *D, const Expr *Exp,
-                                           Expr *MutexExp,
-                                           til::LiteralPtr *Self,
+                                           Expr *MutexExp, til::SExpr *Self,
                                            SourceLocation Loc) {
   CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
   if (Cp.isInvalid()) {
@@ -1911,7 +2002,7 @@ void ThreadSafetyAnalyzer::checkPtAccess(const FactSet 
&FSet, const Expr *Exp,
 ///              of an implicitly called cleanup function.
 /// \param Loc   If \p Exp = nullptr, the location.
 void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
-                              til::LiteralPtr *Self, SourceLocation Loc) {
+                              til::SExpr *Self, SourceLocation Loc) {
   CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
   CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
   CapExprSet ScopedReqsAndExcludes;
@@ -1923,7 +2014,7 @@ void BuildLockset::handleCall(const Expr *Exp, const 
NamedDecl *D,
     const auto *TagT = Exp->getType()->getAs<TagType>();
     if (D->hasAttrs() && TagT && Exp->isPRValue()) {
       til::LiteralPtr *Placeholder =
-          Analyzer->SxBuilder.createVariable(nullptr);
+          Analyzer->SxBuilder.createThisPlaceholder();
       [[maybe_unused]] auto inserted =
           Analyzer->ConstructedObjects.insert({Exp, Placeholder});
       assert(inserted.second && "Are we visiting the same expression again?");
@@ -2217,6 +2308,9 @@ void BuildLockset::examineArguments(const FunctionDecl 
*FD,
 }
 
 void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
+  // adjust the context
+  LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, Exp, LVarCtx);
+
   if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
     const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
     // ME can be null when calling a method pointer
@@ -2604,7 +2698,8 @@ void 
ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
       }
       if (UnderlyingLocks.empty())
         continue;
-      CapabilityExpr Cp(SxBuilder.createVariable(Param), StringRef(),
+      CapabilityExpr Cp(SxBuilder.translateVariable(Param, nullptr),
+                        StringRef(),
                         /*Neg=*/false, /*Reentrant=*/false);
       auto *ScopedEntry = FactMan.createFact<ScopedLockableFactEntry>(
           Cp, Param->getLocation(), FactEntry::Declared,
@@ -2722,17 +2817,19 @@ void 
ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
           if (!DD->hasAttrs())
             break;
 
-          LocksetBuilder.handleCall(nullptr, DD,
-                                    SxBuilder.createVariable(AD.getVarDecl()),
-                                    AD.getTriggerStmt()->getEndLoc());
+          LocksetBuilder.handleCall(
+              nullptr, DD,
+              SxBuilder.translateVariable(AD.getVarDecl(), nullptr),
+              AD.getTriggerStmt()->getEndLoc());
           break;
         }
 
         case CFGElement::CleanupFunction: {
           const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
-          LocksetBuilder.handleCall(/*Exp=*/nullptr, CF.getFunctionDecl(),
-                                    SxBuilder.createVariable(CF.getVarDecl()),
-                                    CF.getVarDecl()->getLocation());
+          LocksetBuilder.handleCall(
+              /*Exp=*/nullptr, CF.getFunctionDecl(),
+              SxBuilder.translateVariable(CF.getVarDecl(), nullptr),
+              CF.getVarDecl()->getLocation());
           break;
         }
 
diff --git a/clang/lib/Analysis/ThreadSafetyCommon.cpp 
b/clang/lib/Analysis/ThreadSafetyCommon.cpp
index f560dd8ae1dd1..cf8ebc0ef8b86 100644
--- a/clang/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -26,6 +26,7 @@
 #include "clang/Basic/LLVM.h"
 #include "clang/Basic/OperatorKinds.h"
 #include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/ScopeExit.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/StringRef.h"
 #include <algorithm>
@@ -241,7 +242,30 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr 
*AttrExp,
   return CapabilityExpr(E, AttrExp->getType(), Neg);
 }
 
-til::LiteralPtr *SExprBuilder::createVariable(const VarDecl *VD) {
+til::SExpr *SExprBuilder::translateVariable(const VarDecl *VD,
+                                            CallingContext *Ctx) {
+  assert(VD);
+
+  // General recursion guard for x = f(x). If we are already in the process of
+  // defining VD, use its pre-assignment value to break the cycle.
+  if (VarsBeingTranslated.contains(VD->getCanonicalDecl()))
+    return new (Arena) til::LiteralPtr(VD);
+  VarsBeingTranslated.insert(VD->getCanonicalDecl());
+  auto Cleanup = llvm::make_scope_exit(
+      [&] { VarsBeingTranslated.erase(VD->getCanonicalDecl()); });
+
+  QualType Ty = VD->getType();
+  if (!VD->isStaticLocal() && Ty->isPointerType()) {
+    // Substitute local variable aliases with a canonical definition.
+    if (LookupLocalVarExpr) {
+      // Attempt to resolve an alias through the more complex local variable 
map
+      // lookup. This will fail with complex control-flow graphs (where we
+      // revert to no alias resolution to retain stable variable names).
+      if (const Expr *E = LookupLocalVarExpr(VD))
+        return translate(E, Ctx);
+    }
+  }
+
   return new (Arena) til::LiteralPtr(VD);
 }
 
@@ -313,6 +337,8 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, 
CallingContext *Ctx) {
 
   case Stmt::DeclStmtClass:
     return translateDeclStmt(cast<DeclStmt>(S), Ctx);
+  case Stmt::StmtExprClass:
+    return translateStmtExpr(cast<StmtExpr>(S), Ctx);
   default:
     break;
   }
@@ -353,6 +379,9 @@ til::SExpr *SExprBuilder::translateDeclRefExpr(const 
DeclRefExpr *DRE,
              : cast<ObjCMethodDecl>(D)->getCanonicalDecl()->getParamDecl(I);
   }
 
+  if (const auto *VarD = dyn_cast<VarDecl>(VD))
+    return translateVariable(VarD, Ctx);
+
   // For non-local variables, treat it as a reference to a named object.
   return new (Arena) til::LiteralPtr(VD);
 }
@@ -691,6 +720,15 @@ SExprBuilder::translateDeclStmt(const DeclStmt *S, 
CallingContext *Ctx) {
   return nullptr;
 }
 
+til::SExpr *SExprBuilder::translateStmtExpr(const StmtExpr *SE,
+                                            CallingContext *Ctx) {
+  // The value of a statement expression is the value of the last statement,
+  // which must be an expression.
+  const CompoundStmt *CS = SE->getSubStmt();
+  return CS->body_empty() ? new (Arena) til::Undefined(SE)
+                          : translate(CS->body_back(), Ctx);
+}
+
 // If (E) is non-trivial, then add it to the current basic block, and
 // update the statement map so that S refers to E.  Returns a new variable
 // that refers to E.
diff --git a/clang/test/Sema/warn-thread-safety-analysis.c 
b/clang/test/Sema/warn-thread-safety-analysis.c
index b43f97af9162e..549cb1231baa6 100644
--- a/clang/test/Sema/warn-thread-safety-analysis.c
+++ b/clang/test/Sema/warn-thread-safety-analysis.c
@@ -184,9 +184,13 @@ int main(void) {
   /// Cleanup functions
   {
     struct Mutex* const __attribute__((cleanup(unlock_scope))) scope = &mu1;
-    mutex_exclusive_lock(scope);  // Note that we have to lock through scope, 
because no alias analysis!
+    mutex_exclusive_lock(scope);  // Lock through scope works.
     // Cleanup happens automatically -> no warning.
   }
+  {
+    struct Mutex* const __attribute__((unused, cleanup(unlock_scope))) scope = 
&mu1;
+    mutex_exclusive_lock(&mu1);  // With basic alias analysis lock through mu1 
also works.
+  }
 
   foo_.a_value = 0; // expected-warning {{writing variable 'a_value' requires 
holding mutex 'mu_' exclusively}}
   *foo_.a_ptr = 1; // expected-warning {{writing the value pointed to by 
'a_ptr' requires holding mutex 'bar.other_mu' exclusively}}
diff --git a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp 
b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
index d82e2484ace1e..5b510716404bb 100644
--- a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
+++ b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
@@ -1556,9 +1556,9 @@ void main() {
   Child *c;
   Base *b = c;
 
-  b->func1(); // expected-warning {{calling function 'func1' requires holding 
mutex 'b->mu_' exclusively}}
+  b->func1(); // expected-warning {{calling function 'func1' requires holding 
mutex 'c->mu_' exclusively}}
   b->mu_.Lock();
-  b->func2(); // expected-warning {{cannot call function 'func2' while mutex 
'b->mu_' is held}}
+  b->func2(); // expected-warning {{cannot call function 'func2' while mutex 
'c->mu_' is held}}
   b->mu_.Unlock();
 
   c->func1(); // expected-warning {{calling function 'func1' requires holding 
mutex 'c->mu_' exclusively}}
@@ -6875,6 +6875,34 @@ class PointerGuard {
 };
 } // namespace Derived_Smart_Pointer
 
+// Test for capabilities that are heap-allocated and stored in static 
variables.
+namespace FunctionStaticVariable {
+struct Data {
+  Mutex mu;
+  int x GUARDED_BY(mu);
+};
+
+void testStaticVariable() {
+}
+
+void testHeapAllocation() {
+  static Data *d = new Data;
+  d->mu.Lock();
+  d->x = 5;
+  d->mu.Unlock();
+}
+
+void testHeapAllocationBug() {
+  static auto *d = new Data;
+  d->x = 10; // expected-warning{{writing variable 'x' requires holding mutex 
'd->mu' exclusively}}
+}
+
+void testHeapAllocationScopedLock() {
+  static Mutex *mu = new Mutex;
+  MutexLock lock(mu);
+}
+} // namespace HeapAllocatedCapability
+
 namespace Reentrancy {
 
 class LOCKABLE REENTRANT_CAPABILITY ReentrantMutex {
@@ -7238,3 +7266,333 @@ class TestNegativeWithReentrantMutex {
 };
 
 } // namespace Reentrancy
+
+// Tests for tracking aliases of capabilities.
+namespace CapabilityAliases {
+struct Foo {
+  Mutex mu;
+  int data GUARDED_BY(mu);
+};
+
+Foo *returnsFoo();
+Foo *returnsFoo(Foo *foo);
+void locksRequired(Foo *foo) EXCLUSIVE_LOCKS_REQUIRED(foo->mu);
+void escapeAlias(int a, Foo *&ptr);
+void escapeAlias(int b, Foo **ptr);
+void passByConstRef(Foo* const& ptr);
+
+void testBasicPointerAlias(Foo *f) {
+  Foo *ptr = f;
+  ptr->mu.Lock();         // lock through alias
+  f->data = 42;           // access through original
+  ptr->mu.Unlock();       // unlock through alias
+}
+
+void testBasicPointerAliasNoInit(Foo *f) {
+  Foo *ptr;
+
+  ptr = nullptr;
+  ptr = f;
+  ptr->mu.Lock();
+  f->data = 42;
+  ptr->mu.Unlock();
+  ptr = nullptr;
+}
+
+void testBasicPointerAliasLoop() {
+  for (;;) {
+    Foo *f = returnsFoo();
+    Foo *ptr = f;
+    if (!ptr)
+      break;
+    ptr->mu.Lock();
+    f->data = 42;
+    ptr->mu.Unlock();
+  }
+}
+
+void testPointerAliasNoEscape1(Foo *f) {
+  Foo *ptr = f;
+  testBasicPointerAlias(ptr);  // pass alias by value
+
+  ptr->mu.Lock();
+  f->data = 42;
+  ptr->mu.Unlock();
+}
+
+void testPointerAliasNoEscape2(Foo *f) {
+  Foo *ptr = f;
+  passByConstRef(ptr);  // pass alias by const ref
+
+  ptr->mu.Lock();
+  f->data = 42;
+  ptr->mu.Unlock();
+}
+
+void testPointerAliasNoEscape3() {
+  Foo *ptr = returnsFoo();
+  ptr->mu.Lock();
+  locksRequired(ptr);
+  ptr->mu.Unlock();
+}
+
+void testPointerAliasEscape1(Foo *f) {
+  Foo *ptr = f;
+  escapeAlias(0, ptr);
+
+  ptr->mu.Lock();
+  f->data = 42;           // expected-warning{{writing variable 'data' 
requires holding mutex 'f->mu' exclusively}} \
+                          // expected-note{{found near match 'ptr->mu'}}
+  ptr->mu.Unlock();
+}
+
+void testPointerAliasEscape2(Foo *f) {
+  Foo *ptr = f;
+  escapeAlias(0, &ptr);
+
+  ptr->mu.Lock();
+  f->data = 42;           // expected-warning{{writing variable 'data' 
requires holding mutex 'f->mu' exclusively}} \
+                          // expected-note{{found near match 'ptr->mu'}}
+  ptr->mu.Unlock();
+}
+
+void testPointerAliasEscape3(Foo *f) {
+  Foo *ptr;
+
+  ptr = f;
+  escapeAlias(0, &ptr);
+
+  ptr->mu.Lock();
+  f->data = 42;           // expected-warning{{writing variable 'data' 
requires holding mutex 'f->mu' exclusively}} \
+                          // expected-note{{found near match 'ptr->mu'}}
+  ptr->mu.Unlock();
+}
+
+void testPointerAliasEscapeAndReset(Foo *f) {
+  Foo *ptr;
+
+  ptr = f;
+  escapeAlias(0, &ptr);
+  ptr = f;
+
+  ptr->mu.Lock();
+  f->data = 42;
+  ptr->mu.Unlock();
+}
+
+void testPointerAliasTryLock1() {
+  Foo *ptr = returnsFoo();
+  if (ptr->mu.TryLock()) {
+    locksRequired(ptr);
+    ptr->mu.Unlock();
+  }
+}
+
+void testPointerAliasTryLock2() {
+  Foo *ptr;
+  ptr = returnsFoo();
+  Foo *ptr2 = ptr;
+  if (ptr->mu.TryLock()) {
+    locksRequired(ptr);
+    ptr2->mu.Unlock();
+  }
+}
+
+// FIXME: This test demonstrates a dubious pattern that the analysis correctly
+// flags as unsafe, though the user might perceive it as a false positive. The
+// pattern combines a TryLock() failure path with a conditional reassignment of
+// the pointer being locked:
+//
+// 1.  The conditional reassignment `ptr = returnsFoo(ptr);` forces `ptr` to
+//     become a phi node in the CFG at the subsequent merge point. The alias
+//     analysis correctly tracks that `ptr` could refer to one of two distinct
+//     objects.
+//
+// 2.  The lock acquired on the `!TryLock()` path should be (conceptually)
+//     `phi(P1, P2)->mu`, while the lock on the successful path is on the
+//     original `P1->mu`. When the paths merge the analysis currently discards
+//     the alias as it cannot prove a single alias on that path.
+//
+// While this pattern is stylistically fragile and difficult to reason about, a
+// robust solution would require a more advanced symbolic representation of
+// capabilities within the analyzer. For now, we warn on such ambiguity.
+void testPointerAliasTryLockDubious(int x) {
+  Foo *ptr = returnsFoo();
+  if (!ptr->mu.TryLock()) {  // expected-note{{mutex acquired here}}
+    if (x)
+      ptr = returnsFoo(ptr); // <-- this breaks the pattern
+    ptr->mu.Lock();          // expected-note{{mutex acquired here}}
+  }
+  ptr->data = 42;            // expected-warning{{writing variable 'data' 
requires holding mutex 'ptr->mu' exclusively}} \
+                             // expected-warning{{mutex 'ptr->mu' is not held 
on every path through here}} \
+                             // expected-warning{{mutex 'returnsFoo().mu' is 
not held on every path through here}}
+  ptr->mu.Unlock();          // expected-warning{{releasing mutex 'ptr->mu' 
that was not held}}
+}
+
+void testReassignment() {
+  Foo f1, f2;
+  Foo *ptr = &f1;
+  ptr->mu.Lock();
+  f1.data = 42;
+  ptr->mu.Unlock();
+
+  ptr = &f2;
+  ptr->mu.Lock();
+  f2.data = 42;
+  f1.data = 42;           // expected-warning{{writing variable 'data' 
requires holding mutex 'f1.mu'}} \
+                          // expected-note{{found near match 'f2.mu'}}
+  ptr->mu.Unlock();
+}
+
+// Nested field access through pointer
+struct Container {
+  Foo foo;
+};
+
+void testNestedAccess(Container *c) {
+  Foo *ptr = &c->foo; // pointer to nested field
+  ptr->mu.Lock();
+  c->foo.data = 42;
+  ptr->mu.Unlock();
+}
+
+void testNestedAcquire(Container *c) EXCLUSIVE_LOCK_FUNCTION(&c->foo.mu) {
+  Foo *buf = &c->foo;
+  buf->mu.Lock();
+}
+
+struct ContainerOfPtr {
+  Foo *foo_ptr;
+};
+
+void testIndirectAccess(ContainerOfPtr *fc) {
+  Foo *ptr = fc->foo_ptr; // get pointer
+  ptr->mu.Lock();
+  fc->foo_ptr->data = 42; // access via original
+  ptr->mu.Unlock();
+}
+
+void testControlFlowDoWhile(Foo *f, int x) {
+  Foo *ptr = f;
+
+  f->mu.Lock();
+  if (x) {
+    // complex merge
+    do { } while (x--);
+  }
+  ptr->data = 42;
+  ptr->mu.Unlock();
+}
+
+// FIXME: No alias tracking through complex control flow.
+void testComplexControlFlow(Foo *f1, Foo *f2, bool cond) {
+  Foo *ptr;
+  if (cond) {
+    ptr = f1;
+  } else {
+    ptr = f2;
+  }
+  ptr->mu.Lock();
+  if (cond) {
+    f1->data = 42; // expected-warning{{writing variable 'data' requires 
holding mutex 'f1->mu' exclusively}} \
+                   // expected-note{{found near match 'ptr->mu'}}
+  } else {
+    f2->data = 42; // expected-warning{{writing variable 'data' requires 
holding mutex 'f2->mu' exclusively}} \
+                   // expected-note{{found near match 'ptr->mu'}}
+  }
+  ptr->mu.Unlock();
+}
+
+void testLockFunction(Foo *f) EXCLUSIVE_LOCK_FUNCTION(&f->mu) {
+  Mutex *mu = &f->mu;
+  mu->Lock();
+}
+
+void testUnlockFunction(Foo *f) UNLOCK_FUNCTION(&f->mu) {
+  Mutex *mu = &f->mu;
+  mu->Unlock();
+}
+
+// This is an idiom to deal with "pointer to returned object has a lock held,
+// but you must unlock it later" where the statement expression would be hidden
+// behind a macro.
+void lockWithinStatementExpr() {
+  Foo *f = ({ auto x = returnsFoo(); x->mu.Lock(); x; });
+  f->data = 42;
+  f->mu.Unlock();
+}
+
+// Semantically UB, but let's not crash the compiler with this (should be
+// handled by -Wuninitialized).
+void testSelfInit() {
+  Mutex *mu = mu; // don't do this at home
+  mu->Lock();
+  mu->Unlock();
+}
+
+void testSelfAssign() {
+  Foo *f = returnsFoo();
+  f = f;
+  f->mu.Lock();
+  f->data = 42;
+  f->mu.Unlock();
+}
+
+void testRecursiveAssign() {
+  Foo *f = returnsFoo();
+  f = returnsFoo(f);
+  f->mu.Lock();
+  f->data = 42;
+  f->mu.Unlock();
+}
+
+void testNestedLoopInvariant(Container *c, int n) {
+  Foo *ptr = &c->foo;
+  ptr->mu.Lock();
+
+  for (int i = 0; i < n; ++i) {
+    for (int j = 0; j < n; ++j) {
+    }
+  }
+
+  c->foo.data = 42; // ok: alias still valid
+  ptr->mu.Unlock();
+}
+
+void testLoopWithBreak(Foo *f, bool cond) {
+  Foo *ptr = f;
+  ptr->mu.Lock();
+  for (int i = 0; i < 10; ++i) {
+    if (cond) {
+      break; // merge point is after the loop
+    }
+  }
+  f->data = 42; // ok
+  ptr->mu.Unlock();
+}
+
+void testLoopWithContinue(Foo *f, bool cond) {
+  Foo *ptr = f;
+  ptr->mu.Lock();
+  for (int i = 0; i < 10; ++i) {
+    if (cond) {
+      continue; // tests merge at the top of loop.
+    }
+  }
+  f->data = 42; // ok
+  ptr->mu.Unlock();
+}
+
+void testLoopConditionalReassignment(Foo *f1, Foo *f2, bool cond) {
+  Foo *ptr = f1;
+  ptr->mu.Lock(); // expected-note{{mutex acquired here}}
+
+  for (int i = 0; i < 10; ++i) {
+    if (cond) {
+      ptr = f2; // alias is reassigned on some path inside the loop.
+    }
+  }
+  f1->data = 42;
+  ptr->mu.Unlock(); // expected-warning{{releasing mutex 'ptr->mu' that was 
not held}}
+} // expected-warning{{mutex 'f1->mu' is still held at the end of function}}
+}  // namespace CapabilityAliases

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to