Author: Amr Hesham
Date: 2025-11-12T17:19:27+01:00
New Revision: f240a73e8368896eefce92689cbee16eb0efdfc9

URL: 
https://github.com/llvm/llvm-project/commit/f240a73e8368896eefce92689cbee16eb0efdfc9
DIFF: 
https://github.com/llvm/llvm-project/commit/f240a73e8368896eefce92689cbee16eb0efdfc9.diff

LOG: [CIR] Upstream Load/Store Complex with volatile qualifier (#167216)

Upstream supporting Load/Store ops for Complex with volatile qualifier

Added: 
    

Modified: 
    clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
    clang/test/CIR/CodeGen/complex-compound-assignment.cpp
    clang/test/CIR/CodeGen/complex.cpp

Removed: 
    


################################################################################
diff  --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index 047f3599eed03..9ed920085c8c6 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -339,7 +339,7 @@ mlir::Value ComplexExprEmitter::emitLoadOfLValue(LValue lv,
     cgf.cgm.errorNYI(loc, "emitLoadOfLValue with Atomic LV");
 
   const Address srcAddr = lv.getAddress();
-  return builder.createLoad(cgf.getLoc(loc), srcAddr);
+  return builder.createLoad(cgf.getLoc(loc), srcAddr, 
lv.isVolatileQualified());
 }
 
 /// EmitStoreOfComplex - Store the specified real/imag parts into the
@@ -353,7 +353,7 @@ void ComplexExprEmitter::emitStoreOfComplex(mlir::Location 
loc, mlir::Value val,
   }
 
   const Address destAddr = lv.getAddress();
-  builder.createStore(loc, val, destAddr);
+  builder.createStore(loc, val, destAddr, lv.isVolatileQualified());
 }
 
 
//===----------------------------------------------------------------------===//

diff  --git a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp 
b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
index a5070f51fad63..f2dbb3cc76ad2 100644
--- a/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
+++ b/clang/test/CIR/CodeGen/complex-compound-assignment.cpp
@@ -237,18 +237,18 @@ void foo4() {
 // CXX_CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["a"]
 // CXX_CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["b"]
 // CXX_CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["c", init]
-// CXX_CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
-// CXX_CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CXX_CIR: %[[TMP_A:.*]] = cir.load volatile {{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CXX_CIR: %[[TMP_B:.*]] = cir.load volatile {{.*}} %[[B_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
 // CXX_CIR: %[[RESULT:.*]] = cir.complex.add %[[TMP_B]], %[[TMP_A]] : 
!cir.complex<!s32i>
-// CXX_CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>
-// CXX_CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CXX_CIR: cir.store volatile {{.*}} %[[RESULT]], %[[B_ADDR]] : 
!cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+// CXX_CIR: %[[TMP_B:.*]] = cir.load volatile {{.*}} %[[B_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
 // CXX_CIR: cir.store{{.*}} %[[TMP_B]], %[[C_ADDR]] : !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>
 
 // CXX_LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
 // CXX_LLVM: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
 // CXX_LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
-// CXX_LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4
-// CXX_LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4
+// CXX_LLVM: %[[TMP_A:.*]] = load volatile { i32, i32 }, ptr %[[A_ADDR]], 
align 4
+// CXX_LLVM: %[[TMP_B:.*]] = load volatile { i32, i32 }, ptr %[[B_ADDR]], 
align 4
 // CXX_LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0
 // CXX_LLVM: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1
 // CXX_LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
@@ -257,8 +257,8 @@ void foo4() {
 // CXX_LLVM: %[[ADD_IMAG:.*]] = add i32 %[[B_IMAG]], %[[A_IMAG]]
 // CXX_LLVM: %[[TMP_RESULT:.*]] = insertvalue { i32, i32 } poison, i32 
%[[ADD_REAL]], 0
 // CXX_LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[TMP_RESULT]], i32 
%[[ADD_IMAG]], 1
-// CXX_LLVM: store { i32, i32 } %[[RESULT]], ptr %[[B_ADDR]], align 4
-// CXX_LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4
+// CXX_LLVM: store volatile { i32, i32 } %[[RESULT]], ptr %[[B_ADDR]], align 4
+// CXX_LLVM: %[[TMP_B:.*]] = load volatile { i32, i32 }, ptr %[[B_ADDR]], 
align 4
 // CXX_LLVM: store { i32, i32 } %[[TMP_B]], ptr %[[C_ADDR]], align 4
 
 // CXX_OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4

diff  --git a/clang/test/CIR/CodeGen/complex.cpp 
b/clang/test/CIR/CodeGen/complex.cpp
index 4eab3999dfc42..82c9f2d7aaf26 100644
--- a/clang/test/CIR/CodeGen/complex.cpp
+++ b/clang/test/CIR/CodeGen/complex.cpp
@@ -1534,3 +1534,146 @@ void imag_literal_gnu_extension() {
 // OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[C_ADDR]], i32 0, i32 1
 // OGCG: store i32 0, ptr %[[C_REAL_PTR]], align 4
 // OGCG: store i32 3, ptr %[[C_IMAG_PTR]], align 4
+
+void load_store_volatile() {
+  volatile double _Complex a;
+  volatile double _Complex b;
+  a = b;
+
+  volatile int _Complex c;
+  volatile int _Complex d;
+  c = d;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, 
!cir.ptr<!cir.complex<!cir.double>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, 
!cir.ptr<!cir.complex<!cir.double>>, ["b"]
+// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["c"]
+// CIR: %[[D_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["d"]
+// CIR: %[[TMP_B:.*]] = cir.load volatile {{.*}} %[[B_ADDR]] : 
!cir.ptr<!cir.complex<!cir.double>>, !cir.complex<!cir.double>
+// CIR: cir.store volatile {{.*}} %[[TMP_B]], %[[A_ADDR]] : 
!cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>
+// CIR: %[[TMP_D:.*]] = cir.load volatile {{.*}} %[[D_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: cir.store volatile {{.*}} %[[TMP_D]], %[[C_ADDR]] : 
!cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { double, double }, i64 1, align 8
+// LLVM: %[[B_ADDR:.*]] = alloca { double, double }, i64 1, align 8
+// LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[D_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[TMP_B:.*]] = load volatile { double, double }, ptr %[[B_ADDR]], 
align 8
+// LLVM: store volatile { double, double } %[[TMP_B]], ptr %[[A_ADDR]], align 8
+// LLVM: %[[TMP_D:.*]] = load volatile { i32, i32 }, ptr %[[D_ADDR]], align 4
+// LLVM: store volatile { i32, i32 } %[[TMP_D]], ptr %[[C_ADDR]], align 4
+
+// OGCG: %[[A_ADDR:.*]] = alloca { double, double }, align 8
+// OGCG: %[[B_ADDR:.*]] = alloca { double, double }, align 8
+// OGCG: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[D_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_REAL:.*]] = load volatile double, ptr %[[B_REAL_PTR]], align 8
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: %[[B_IMAG:.*]] = load volatile double, ptr %[[B_IMAG_PTR]], align 8
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: store volatile double %[[B_REAL]], ptr %[[A_REAL_PTR]], align 8
+// OGCG: store volatile double %[[B_IMAG]], ptr %[[A_IMAG_PTR]], align 8
+// OGCG: %[[D_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[D_ADDR]], i32 0, i32 0
+// OGCG: %[[D_REAL:.*]] = load volatile i32, ptr %[[D_REAL_PTR]], align 4
+// OGCG: %[[D_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[D_ADDR]], i32 0, i32 1
+// OGCG: %[[D_IMAG:.*]] = load volatile i32, ptr %[[D_IMAG_PTR]], align 4
+// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[C_ADDR]], i32 0, i32 0
+// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[C_ADDR]], i32 0, i32 1
+// OGCG: store volatile i32 %[[D_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG: store volatile i32 %[[D_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+
+
+void load_store_volatile_2() {
+  volatile double _Complex av;
+  double _Complex a;
+  av = a;
+
+  double _Complex b;
+  volatile double _Complex bv;
+  b = bv;
+
+  int _Complex c;
+  volatile int _Complex cv;
+  c = cv;
+
+  volatile int _Complex dv;
+  int _Complex d;
+  dv = d;
+}
+
+// CIR: %[[AV_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, 
!cir.ptr<!cir.complex<!cir.double>>, ["av"]
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, 
!cir.ptr<!cir.complex<!cir.double>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, 
!cir.ptr<!cir.complex<!cir.double>>, ["b"]
+// CIR: %[[BV_ADDR:.*]] = cir.alloca !cir.complex<!cir.double>, 
!cir.ptr<!cir.complex<!cir.double>>, ["bv"]
+// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["c"]
+// CIR: %[[CV_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["cv"]
+// CIR: %[[DV_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["dv"]
+// CIR: %[[D_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>, ["d"]
+// CIR: %[[TMP_A:.*]] = cir.load {{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.double>>, !cir.complex<!cir.double>
+// CIR: cir.store volatile {{.*}} %[[TMP_A]], %[[AV_ADDR]] : 
!cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>
+// CIR: %[[TMP_BV:.*]] = cir.load volatile {{.*}} %[[BV_ADDR]] : 
!cir.ptr<!cir.complex<!cir.double>>, !cir.complex<!cir.double>
+// CIR: cir.store {{.*}} %[[TMP_BV]], %[[B_ADDR]] : !cir.complex<!cir.double>, 
!cir.ptr<!cir.complex<!cir.double>>
+// CIR: %[[TMP_CV:.*]] = cir.load volatile {{.*}} %[[CV_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: cir.store {{.*}} %[[TMP_CV]], %[[C_ADDR]] : !cir.complex<!s32i>, 
!cir.ptr<!cir.complex<!s32i>>
+// CIR: %[[TMP_D:.*]] = cir.load {{.*}} %[[D_ADDR]] : 
!cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
+// CIR: cir.store volatile {{.*}} %[[TMP_D]], %[[DV_ADDR]] : 
!cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
+
+// LLVM: %[[AV_ADDR:.*]] = alloca { double, double }, i64 1, align 8
+// LLVM: %[[A_ADDR:.*]] = alloca { double, double }, i64 1, align 8
+// LLVM: %[[B_ADDR:.*]] = alloca { double, double }, i64 1, align 8
+// LLVM: %[[BV_ADDR:.*]] = alloca { double, double }, i64 1, align 8
+// LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[CV_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[DV_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[D_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
+// LLVM: %[[TMP_A:.*]] = load { double, double }, ptr %[[A_ADDR]], align 8
+// LLVM: store volatile { double, double } %[[TMP_A]], ptr %[[AV_ADDR]], align 
8
+// LLVM: %[[TMP_BV:.*]] = load volatile { double, double }, ptr %[[BV_ADDR]], 
align 8
+// LLVM: store { double, double } %[[TMP_BV]], ptr %[[B_ADDR]], align 8
+// LLVM: %[[TMP_CV:.*]] = load volatile { i32, i32 }, ptr %[[CV_ADDR]], align 4
+// LLVM: store { i32, i32 } %[[TMP_CV]], ptr %[[C_ADDR]], align 4
+// LLVM: %[[TMP_D:.*]] = load { i32, i32 }, ptr %[[D_ADDR]], align 4
+// LLVM: store volatile { i32, i32 } %[[TMP_D]], ptr %[[DV_ADDR]], align 4
+
+// OGCG: %[[AV_ADDR:.*]] = alloca { double, double }, align 8
+// OGCG: %[[A_ADDR:.*]] = alloca { double, double }, align 8
+// OGCG: %[[B_ADDR:.*]] = alloca { double, double }, align 8
+// OGCG: %[[BV_ADDR:.*]] = alloca { double, double }, align 8
+// OGCG: %[[C_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[CV_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[DV_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[D_ADDR:.*]] = alloca { i32, i32 }, align 4
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load double, ptr %[[A_REAL_PTR]], align 8
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load double, ptr %[[A_IMAG_PTR]], align 8
+// OGCG: %[[AV_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[AV_ADDR]], i32 0, i32 0
+// OGCG: %[[AV_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[AV_ADDR]], i32 0, i32 1
+// OGCG: store volatile double %[[A_REAL]], ptr %[[AV_REAL_PTR]], align 8
+// OGCG: store volatile double %[[A_IMAG]], ptr %[[AV_IMAG_PTR]], align 8
+// OGCG: %[[BV_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[BV_ADDR]], i32 0, i32 0
+// OGCG: %[[BV_REAL:.*]] = load volatile double, ptr %[[BV_REAL_PTR]], align 8
+// OGCG: %[[BV_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[BV_ADDR]], i32 0, i32 1
+// OGCG: %[[BV_IMAG:.*]] = load volatile double, ptr %[[BV_IMAG_PTR]], align 8
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { double, double }, 
ptr %[[B_ADDR]], i32 0, i32 1
+// OGCG: store double %[[BV_REAL]], ptr %[[B_REAL_PTR]], align 8
+// OGCG: store double %[[BV_IMAG]], ptr %[[B_IMAG_PTR]], align 8
+// OGCG: %[[CV_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[CV_ADDR]], i32 0, i32 0
+// OGCG: %[[CV_REAL:.*]] = load volatile i32, ptr %[[CV_REAL_PTR]], align 4
+// OGCG: %[[CV_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[CV_ADDR]], i32 0, i32 1
+// OGCG: %[[CV_IMAG:.*]] = load volatile i32, ptr %[[CV_IMAG_PTR]], align 4
+// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[C_ADDR]], i32 0, i32 0
+// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[C_ADDR]], i32 0, i32 1
+// OGCG: store i32 %[[CV_REAL]], ptr %[[C_REAL_PTR]], align 4
+// OGCG: store i32 %[[CV_IMAG]], ptr %[[C_IMAG_PTR]], align 4
+// OGCG: %[[D_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[D_ADDR]], i32 0, i32 0
+// OGCG: %[[D_REAL:.*]] = load i32, ptr %[[D_REAL_PTR]], align 4
+// OGCG: %[[D_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[D_ADDR]], i32 0, i32 1
+// OGCG: %[[D_IMAG:.*]] = load i32, ptr %[[D_IMAG_PTR]], align 4
+// OGCG: %[[DV_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[DV_ADDR]], i32 0, i32 0
+// OGCG: %[[DV_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr 
%[[DV_ADDR]], i32 0, i32 1
+// OGCG: store volatile i32 %[[D_REAL]], ptr %[[DV_REAL_PTR]], align 4
+// OGCG: store volatile i32 %[[D_IMAG]], ptr %[[DV_IMAG_PTR]], align 4


        
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to