Author: Jin Huang
Date: 2025-12-19T20:57:05-08:00
New Revision: a3824de800d15545a231c4eb54b763b742eb3d40

URL: 
https://github.com/llvm/llvm-project/commit/a3824de800d15545a231c4eb54b763b742eb3d40
DIFF: 
https://github.com/llvm/llvm-project/commit/a3824de800d15545a231c4eb54b763b742eb3d40.diff

LOG: Revert "[AA] Improve precision for monotonic atomic load/store operations 
(#1…"

This reverts commit ce2518f59d9bc67553f05536ac9bb200827c36da.

Added: 
    llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll

Modified: 
    llvm/lib/Analysis/AliasAnalysis.cpp
    llvm/test/Transforms/DeadStoreElimination/atomic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/AliasAnalysis.cpp 
b/llvm/lib/Analysis/AliasAnalysis.cpp
index fd2f7c1ea9c8d..26a560252d9aa 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -433,7 +433,7 @@ ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
                                     const MemoryLocation &Loc,
                                     AAQueryInfo &AAQI) {
   // Be conservative in the face of atomic.
-  if (isStrongerThanMonotonic(L->getOrdering()))
+  if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered))
     return ModRefInfo::ModRef;
 
   // If the load address doesn't alias the given address, it doesn't read
@@ -443,13 +443,6 @@ ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
     if (AR == AliasResult::NoAlias)
       return ModRefInfo::NoModRef;
   }
-
-  assert(!isStrongerThanMonotonic(L->getOrdering()) &&
-         "Stronger atomic orderings should have been handled above!");
-
-  if (isStrongerThanUnordered(L->getOrdering()))
-    return ModRefInfo::ModRef;
-
   // Otherwise, a load just reads.
   return ModRefInfo::Ref;
 }
@@ -458,7 +451,7 @@ ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
                                     const MemoryLocation &Loc,
                                     AAQueryInfo &AAQI) {
   // Be conservative in the face of atomic.
-  if (isStrongerThanMonotonic(S->getOrdering()))
+  if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered))
     return ModRefInfo::ModRef;
 
   if (Loc.Ptr) {
@@ -476,13 +469,7 @@ ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
       return ModRefInfo::NoModRef;
   }
 
-  assert(!isStrongerThanMonotonic(S->getOrdering()) &&
-         "Stronger atomic orderings should have been handled above!");
-
-  if (isStrongerThanUnordered(S->getOrdering()))
-    return ModRefInfo::ModRef;
-
-  // A store just writes.
+  // Otherwise, a store just writes.
   return ModRefInfo::Mod;
 }
 

diff  --git a/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll 
b/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
new file mode 100644
index 0000000000000..1c160442f8579
--- /dev/null
+++ b/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
@@ -0,0 +1,23 @@
+; XFAIL: *
+; RUN: opt -passes=dse -S < %s | FileCheck %s
+
+target datalayout = 
"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
+target triple = "x86_64-apple-macosx10.7.0"
+
+; Basic correctness tests for atomic stores.
+; Note that it turns out essentially every transformation DSE does is legal on
+; atomic ops, just some transformations are not allowed across release-acquire 
pairs.
+
+@x = common global i32 0, align 4
+@y = common global i32 0, align 4
+
+; DSE across monotonic load (allowed as long as the eliminated store 
isUnordered)
+define i32 @test9() {
+; CHECK-LABEL: test9
+; CHECK-NOT: store i32 0
+; CHECK: store i32 1
+  store i32 0, ptr @x
+  %x = load atomic i32, ptr @y monotonic, align 4
+  store i32 1, ptr @x
+  ret i32 %x
+}

diff  --git a/llvm/test/Transforms/DeadStoreElimination/atomic.ll 
b/llvm/test/Transforms/DeadStoreElimination/atomic.ll
index 41c444595fa8a..55b9384e88d93 100644
--- a/llvm/test/Transforms/DeadStoreElimination/atomic.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/atomic.ll
@@ -37,21 +37,9 @@ define void @test4() {
   ret void
 }
 
-; DSE doesn't remove monotonic store.
+; DSE unordered store overwriting non-atomic store (allowed)
 define void @test5() {
 ; CHECK-LABEL: @test5(
-; CHECK-NEXT:    store atomic i32 2, ptr @x monotonic, align 4
-; CHECK-NEXT:    store i32 1, ptr @x, align 4
-; CHECK-NEXT:    ret void
-;
-  store atomic i32 2, ptr @x monotonic, align 4
-  store i32 1, ptr @x
-  ret void
-}
-
-; DSE unordered store overwriting non-atomic store (allowed)
-define void @test6() {
-; CHECK-LABEL: @test6(
 ; CHECK-NEXT:    store atomic i32 1, ptr @x unordered, align 4
 ; CHECK-NEXT:    ret void
 ;
@@ -61,8 +49,8 @@ define void @test6() {
 }
 
 ; DSE no-op unordered atomic store (allowed)
-define void @test7() {
-; CHECK-LABEL: @test7(
+define void @test6() {
+; CHECK-LABEL: @test6(
 ; CHECK-NEXT:    ret void
 ;
   %x = load atomic i32, ptr @x unordered, align 4
@@ -72,8 +60,8 @@ define void @test7() {
 
 ; DSE seq_cst store (be conservative; DSE doesn't have infrastructure
 ; to reason about atomic operations).
-define void @test8() {
-; CHECK-LABEL: @test8(
+define void @test7() {
+; CHECK-LABEL: @test7(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    store atomic i32 0, ptr [[A]] seq_cst, align 4
 ; CHECK-NEXT:    ret void
@@ -85,8 +73,8 @@ define void @test8() {
 
 ; DSE and seq_cst load (be conservative; DSE doesn't have infrastructure
 ; to reason about atomic operations).
-define i32 @test9() {
-; CHECK-LABEL: @test9(
+define i32 @test8() {
+; CHECK-LABEL: @test8(
 ; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
 ; CHECK-NEXT:    call void @randomop(ptr [[A]])
 ; CHECK-NEXT:    store i32 0, ptr [[A]], align 4
@@ -100,40 +88,11 @@ define i32 @test9() {
   ret i32 %x
 }
 
-; DSE across monotonic load (allowed if the monotonic load's address is 
NoAlias)
-define i32 @test10() {
-; CHECK-LABEL: @test10(
-; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr @y monotonic, align 4
-; CHECK-NEXT:    store i32 1, ptr @x, align 4
-; CHECK-NEXT:    ret i32 [[X]]
-;
-  store i32 0, ptr @x
-  %x = load atomic i32, ptr @y monotonic, align 4
-  store i32 1, ptr @x
-  ret i32 %x
-}
-
-; DSE across monotonic load (blocked if the atomic load's address isn't 
NoAlias)
-define i32 @test11(ptr %ptr) {
-; CHECK-LABEL: @test11(
-; CHECK-NEXT:    store i32 0, ptr @x, align 4
-; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 
4
-; CHECK-NEXT:    store i32 1, ptr @x, align 4
-; CHECK-NEXT:    ret i32 [[X]]
-;
-  store i32 0, ptr @x
-  %x = load atomic i32, ptr %ptr monotonic, align 4
-  store i32 1, ptr @x
-  ret i32 %x
-}
-
 ; DSE across monotonic store (allowed as long as the eliminated store 
isUnordered)
-define void @test12() {
-; CHECK-LABEL: @test12(
-; CHECK-NEXT:    store atomic i32 42, ptr @y monotonic, align 4
-; CHECK-NEXT:    store i32 1, ptr @x, align 4
-; CHECK-NEXT:    ret void
-;
+define void @test10() {
+; CHECK-LABEL: test10
+; CHECK-NOT: store i32 0
+; CHECK: store i32 1
   store i32 0, ptr @x
   store atomic i32 42, ptr @y monotonic, align 4
   store i32 1, ptr @x
@@ -141,8 +100,8 @@ define void @test12() {
 }
 
 ; DSE across monotonic load (forbidden since the eliminated store is atomic)
-define i32 @test13() {
-; CHECK-LABEL: @test13(
+define i32 @test11() {
+; CHECK-LABEL: @test11(
 ; CHECK-NEXT:    store atomic i32 0, ptr @x monotonic, align 4
 ; CHECK-NEXT:    [[X:%.*]] = load atomic i32, ptr @y monotonic, align 4
 ; CHECK-NEXT:    store atomic i32 1, ptr @x monotonic, align 4
@@ -155,8 +114,8 @@ define i32 @test13() {
 }
 
 ; DSE across monotonic store (forbidden since the eliminated store is atomic)
-define void @test14() {
-; CHECK-LABEL: @test14(
+define void @test12() {
+; CHECK-LABEL: @test12(
 ; CHECK-NEXT:    store atomic i32 0, ptr @x monotonic, align 4
 ; CHECK-NEXT:    store atomic i32 42, ptr @y monotonic, align 4
 ; CHECK-NEXT:    store atomic i32 1, ptr @x monotonic, align 4
@@ -191,7 +150,7 @@ define i32 @test15() {
 define i64 @test_atomicrmw_0() {
 ; CHECK-LABEL: @test_atomicrmw_0(
 ; CHECK-NEXT:    store i64 1, ptr @z, align 8
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @z, i64 -1 monotonic, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @z, i64 -1 monotonic
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
   store i64 1, ptr @z
@@ -203,7 +162,7 @@ define i64 @test_atomicrmw_0() {
 define i64 @test_atomicrmw_1() {
 ; CHECK-LABEL: @test_atomicrmw_1(
 ; CHECK-NEXT:    store i64 1, ptr @z, align 8
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @z, i64 -1 acq_rel, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @z, i64 -1 acq_rel
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
   store i64 1, ptr @z
@@ -214,7 +173,7 @@ define i64 @test_atomicrmw_1() {
 ; Monotonic atomicrmw should not block eliminating no-aliasing stores.
 define i64 @test_atomicrmw_2() {
 ; CHECK-LABEL: @test_atomicrmw_2(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @a, i64 -1 monotonic, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @a, i64 -1 monotonic
 ; CHECK-NEXT:    store i64 2, ptr @z, align 8
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
@@ -228,7 +187,7 @@ define i64 @test_atomicrmw_2() {
 define i64 @test_atomicrmw_3() {
 ; CHECK-LABEL: @test_atomicrmw_3(
 ; CHECK-NEXT:    store i64 1, ptr @z, align 8
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @a, i64 -1 release, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @a, i64 -1 release
 ; CHECK-NEXT:    store i64 2, ptr @z, align 8
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
@@ -242,7 +201,7 @@ define i64 @test_atomicrmw_3() {
 define i64 @test_atomicrmw_4(ptr %ptr) {
 ; CHECK-LABEL: @test_atomicrmw_4(
 ; CHECK-NEXT:    store i64 1, ptr @z, align 8
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr [[PTR:%.*]], i64 -1 
monotonic, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr [[PTR:%.*]], i64 -1 monotonic
 ; CHECK-NEXT:    store i64 2, ptr @z, align 8
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
@@ -256,7 +215,7 @@ define i64 @test_atomicrmw_4(ptr %ptr) {
 define i64 @test_atomicrmw_5() {
 ; CHECK-LABEL: @test_atomicrmw_5(
 ; CHECK-NEXT:    store i64 1, ptr @z, align 8
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @z, i64 -1 monotonic, align 8
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw add ptr @z, i64 -1 monotonic
 ; CHECK-NEXT:    store i64 2, ptr @z, align 8
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
@@ -270,7 +229,7 @@ define i64 @test_atomicrmw_5() {
 define { i32, i1} @test_cmpxchg_1() {
 ; CHECK-LABEL: @test_cmpxchg_1(
 ; CHECK-NEXT:    store i32 1, ptr @x, align 4
-; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @x, i32 10, i32 20 seq_cst 
monotonic, align 4
+; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @x, i32 10, i32 20 seq_cst 
monotonic
 ; CHECK-NEXT:    store i32 2, ptr @x, align 4
 ; CHECK-NEXT:    ret { i32, i1 } [[RET]]
 ;
@@ -283,7 +242,7 @@ define { i32, i1} @test_cmpxchg_1() {
 ; Monotonic cmpxchg should not block DSE for non-aliasing stores.
 define { i32, i1} @test_cmpxchg_2() {
 ; CHECK-LABEL: @test_cmpxchg_2(
-; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @y, i32 10, i32 20 
monotonic monotonic, align 4
+; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @y, i32 10, i32 20 
monotonic monotonic
 ; CHECK-NEXT:    store i32 2, ptr @x, align 4
 ; CHECK-NEXT:    ret { i32, i1 } [[RET]]
 ;
@@ -297,7 +256,7 @@ define { i32, i1} @test_cmpxchg_2() {
 define { i32, i1} @test_cmpxchg_3() {
 ; CHECK-LABEL: @test_cmpxchg_3(
 ; CHECK-NEXT:    store i32 1, ptr @x, align 4
-; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @y, i32 10, i32 20 seq_cst 
seq_cst, align 4
+; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @y, i32 10, i32 20 seq_cst 
seq_cst
 ; CHECK-NEXT:    store i32 2, ptr @x, align 4
 ; CHECK-NEXT:    ret { i32, i1 } [[RET]]
 ;
@@ -311,7 +270,7 @@ define { i32, i1} @test_cmpxchg_3() {
 define { i32, i1} @test_cmpxchg_4(ptr %ptr) {
 ; CHECK-LABEL: @test_cmpxchg_4(
 ; CHECK-NEXT:    store i32 1, ptr @x, align 4
-; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr [[PTR:%.*]], i32 10, i32 
20 monotonic monotonic, align 4
+; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr [[PTR:%.*]], i32 10, i32 
20 monotonic monotonic
 ; CHECK-NEXT:    store i32 2, ptr @x, align 4
 ; CHECK-NEXT:    ret { i32, i1 } [[RET]]
 ;
@@ -325,7 +284,7 @@ define { i32, i1} @test_cmpxchg_4(ptr %ptr) {
 define { i32, i1} @test_cmpxchg_5(ptr %ptr) {
 ; CHECK-LABEL: @test_cmpxchg_5(
 ; CHECK-NEXT:    store i32 1, ptr @x, align 4
-; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @x, i32 10, i32 20 
monotonic monotonic, align 4
+; CHECK-NEXT:    [[RET:%.*]] = cmpxchg volatile ptr @x, i32 10, i32 20 
monotonic monotonic
 ; CHECK-NEXT:    store i32 2, ptr @x, align 4
 ; CHECK-NEXT:    ret { i32, i1 } [[RET]]
 ;


        
_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits

Reply via email to