https://github.com/AmrDeveloper updated 
https://github.com/llvm/llvm-project/pull/155486

>From c97287958e5db1a598423b37ac63a940277f1988 Mon Sep 17 00:00:00 2001
From: AmrDeveloper <am...@programmer.net>
Date: Tue, 26 Aug 2025 21:34:52 +0200
Subject: [PATCH 1/2] [CIR] Complex Unary plus and minus with promoted type

---
 clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp |  12 +-
 clang/test/CIR/CodeGen/complex-unary.cpp    | 118 ++++++++++++++++++++
 2 files changed, 122 insertions(+), 8 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index bb1b55f2d16f4..a86b2d5e6ed1b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -323,10 +323,8 @@ mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr 
*op,
 mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *e) {
   QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
   mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Plus, promotionTy);
-  if (!promotionTy.isNull()) {
-    cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryPlus emitUnPromotedValue");
-    return {};
-  }
+  if (!promotionTy.isNull())
+    return cgf.emitUnPromotedValue(result, e->getSubExpr()->getType());
   return result;
 }
 
@@ -348,10 +346,8 @@ mlir::Value ComplexExprEmitter::VisitPlusMinus(const 
UnaryOperator *e,
 mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e) {
   QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
   mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Minus, promotionTy);
-  if (!promotionTy.isNull()) {
-    cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryMinus 
emitUnPromotedValue");
-    return {};
-  }
+  if (!promotionTy.isNull())
+    return cgf.emitUnPromotedValue(result, e->getSubExpr()->getType());
   return result;
 }
 
diff --git a/clang/test/CIR/CodeGen/complex-unary.cpp 
b/clang/test/CIR/CodeGen/complex-unary.cpp
index 4cd81eb40597a..81803817e0c99 100644
--- a/clang/test/CIR/CodeGen/complex-unary.cpp
+++ b/clang/test/CIR/CodeGen/complex-unary.cpp
@@ -370,3 +370,121 @@ void foo8() {
 // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr 
%[[B_ADDR]], i32 0, i32 1
 // OGCG: store float %[[A_REAL_MINUS]], ptr %[[B_REAL_PTR]], align 4
 // OGCG: store float %[[A_IMAG_MINUS]], ptr %[[B_IMAG_PTR]], align 4
+
+void foo9() {
+  _Float16 _Complex a;
+  _Float16 _Complex b = +a;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> 
-> !cir.f16
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> 
-> !cir.f16
+// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), 
!cir.float
+// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), 
!cir.float
+// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], 
%[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_REAL_F32:.*]] = cir.unary(plus, %[[A_REAL_F32:.*]]) : 
!cir.float, !cir.float
+// CIR: %[[RESULT_IMAG_F32:.*]] = cir.unary(plus, %[[A_IMAG_F32:.*]]) : 
!cir.float, !cir.float
+// CIR: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], 
%[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : 
!cir.float), !cir.f16
+// CIR: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : 
!cir.float), !cir.f16
+// CIR: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], 
%[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
+// CIR: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : 
!cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
+// LLVM: %[[B_ADDR:.*]] = alloca { half, half }, i64 1, align 2
+// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2
+// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1
+// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
+// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
+// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, 
float %[[A_REAL_F32]], 0
+// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } 
%[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
+// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, 
float %[[A_REAL_F32]], 0
+// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } 
%[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
+// LLVM: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half
+// LLVM: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half
+// LLVM: %[[TMP_RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } {{.*}}, 
half %[[A_REAL_F16]], 0
+// LLVM: %[[RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } 
%[[TMP_RESULT_COMPLEX_F16]], half %[[A_IMAG_F16]], 1
+// LLVM: store { half, half } %[[RESULT_COMPLEX_F16]], ptr %[[B_ADDR]], align 2
+
+// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2
+// OGCG: %[[B_ADDR:.*]] = alloca { half, half }, align 2
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr 
%[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load half, ptr %a.realp, align 2
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr 
%[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load half, ptr %a.imagp, align 2
+// OGCG: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
+// OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
+// OGCG: %[[RESULT_REAL:.*]] = fptrunc float %[[A_REAL_F32]] to half
+// OGCG: %[[RESULT_IMAG:.*]] = fptrunc float %[[A_IMAG_F32]] to half
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr 
%[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr 
%[[B_ADDR]], i32 0, i32 1
+// OGCG: store half %[[RESULT_REAL]], ptr %[[B_REAL_PTR]], align 2
+// OGCG: store half %[[RESULT_IMAG]], ptr %[[B_IMAG_PTR]], align 2
+
+void foo10() {
+  _Float16 _Complex a;
+  _Float16 _Complex b = -a;
+}
+
+// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["a"]
+// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
+// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
+// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> 
-> !cir.f16
+// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> 
-> !cir.f16
+// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), 
!cir.float
+// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), 
!cir.float
+// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], 
%[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_REAL_F32:.*]] = cir.unary(minus, %[[A_REAL_F32:.*]]) : 
!cir.float, !cir.float
+// CIR: %[[RESULT_IMAG_F32:.*]] = cir.unary(minus, %[[A_IMAG_F32:.*]]) : 
!cir.float, !cir.float
+// CIR: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], 
%[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : 
!cir.float), !cir.f16
+// CIR: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : 
!cir.float), !cir.f16
+// CIR: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], 
%[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
+// CIR: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : 
!cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
+
+// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
+// LLVM: %[[B_ADDR:.*]] = alloca { half, half }, i64 1, align 2
+// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2
+// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0
+// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1
+// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
+// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
+// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, 
float %[[A_REAL_F32]], 0
+// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } 
%[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
+// LLVM: %[[RESULT_REAL_F32:.*]] = fneg float %[[A_REAL_F32]]
+// LLVM: %[[RESULT_IMAG_F32:.*]] = fneg float %[[A_IMAG_F32]]
+// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, 
float %[[RESULT_REAL_F32]], 0
+// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } 
%[[TMP_A_COMPLEX_F32]], float %[[RESULT_IMAG_F32]], 1
+// LLVM: %[[A_REAL_F16:.*]] = fptrunc float %[[RESULT_REAL_F32]] to half
+// LLVM: %[[A_IMAG_F16:.*]] = fptrunc float %[[RESULT_IMAG_F32]] to half
+// LLVM: %[[TMP_RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } {{.*}}, 
half %[[A_REAL_F16]], 0
+// LLVM: %[[RESULT_COMPLEX_F16:.*]] = insertvalue { half, half } 
%[[TMP_RESULT_COMPLEX_F16]], half %[[A_IMAG_F16]], 1
+// LLVM: store { half, half } %[[RESULT_COMPLEX_F16]], ptr %[[B_ADDR]], align 2
+
+// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2
+// OGCG: %[[B_ADDR:.*]] = alloca { half, half }, align 2
+// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr 
%[[A_ADDR]], i32 0, i32 0
+// OGCG: %[[A_REAL:.*]] = load half, ptr %a.realp, align 2
+// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr 
%[[A_ADDR]], i32 0, i32 1
+// OGCG: %[[A_IMAG:.*]] = load half, ptr %a.imagp, align 2
+// OGCG: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
+// OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
+// OGCG: %[[RESULT_REAL_F32:.*]] = fneg float %[[A_REAL_F32]]
+// OGCG: %[[RESULT_IMAG_F32:.*]] = fneg float %[[A_IMAG_F32]]
+// OGCG: %[[RESULT_REAL:.*]] = fptrunc float %[[RESULT_REAL_F32]] to half
+// OGCG: %[[RESULT_IMAG:.*]] = fptrunc float %[[RESULT_IMAG_F32]] to half
+// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr 
%[[B_ADDR]], i32 0, i32 0
+// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr 
%[[B_ADDR]], i32 0, i32 1
+// OGCG: store half %[[RESULT_REAL]], ptr %[[B_REAL_PTR]], align 2
+// OGCG: store half %[[RESULT_IMAG]], ptr %[[B_IMAG_PTR]], align 2

>From 87c29f181c1706aa9ddf91a2a0eb96e555896b78 Mon Sep 17 00:00:00 2001
From: AmrDeveloper <am...@programmer.net>
Date: Wed, 27 Aug 2025 18:47:39 +0200
Subject: [PATCH 2/2] Add tests for before lowering prepare

---
 clang/test/CIR/CodeGen/complex-unary.cpp | 93 ++++++++++++++----------
 1 file changed, 55 insertions(+), 38 deletions(-)

diff --git a/clang/test/CIR/CodeGen/complex-unary.cpp 
b/clang/test/CIR/CodeGen/complex-unary.cpp
index 81803817e0c99..e945d9b09f613 100644
--- a/clang/test/CIR/CodeGen/complex-unary.cpp
+++ b/clang/test/CIR/CodeGen/complex-unary.cpp
@@ -376,25 +376,34 @@ void foo9() {
   _Float16 _Complex b = +a;
 }
 
-// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["a"]
-// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
-// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
-// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> 
-> !cir.f16
-// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> 
-> !cir.f16
-// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), 
!cir.float
-// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), 
!cir.float
-// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], 
%[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
-// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
-// CIR: %[[RESULT_REAL_F32:.*]] = cir.unary(plus, %[[A_REAL_F32:.*]]) : 
!cir.float, !cir.float
-// CIR: %[[RESULT_IMAG_F32:.*]] = cir.unary(plus, %[[A_IMAG_F32:.*]]) : 
!cir.float, !cir.float
-// CIR: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], 
%[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
-// CIR: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
-// CIR: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
-// CIR: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : 
!cir.float), !cir.f16
-// CIR: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : 
!cir.float), !cir.f16
-// CIR: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], 
%[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
-// CIR: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : 
!cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
+
+// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["a"]
+// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
+// CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
+// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast(float_complex, %[[TMP_A]] : 
!cir.complex<!cir.f16>), !cir.complex<!cir.float>
+// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(plus, %[[A_COMPLEX_F32]]) : 
!cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast(float_complex, %[[RESULT]] : 
!cir.complex<!cir.float>), !cir.complex<!cir.f16>
+// CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : 
!cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
+
+// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["a"]
+// CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
+// CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
+// CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : 
!cir.complex<!cir.f16> -> !cir.f16
+// CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : 
!cir.complex<!cir.f16> -> !cir.f16
+// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), 
!cir.float
+// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), 
!cir.float
+// CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], 
%[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.unary(plus, %[[A_REAL_F32]]) : 
!cir.float, !cir.float
+// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.unary(plus, %[[A_IMAG_F32]]) : 
!cir.float, !cir.float
+// CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create 
%[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> 
!cir.complex<!cir.float>
+// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real 
%[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag 
%[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, 
%[[RESULT_REAL_F32]] : !cir.float), !cir.f16
+// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, 
%[[RESULT_IMAG_F32]] : !cir.float), !cir.f16
+// CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create 
%[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
+// CIR-AFTER: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : 
!cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
 
 // LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
 // LLVM: %[[B_ADDR:.*]] = alloca { half, half }, i64 1, align 2
@@ -433,25 +442,33 @@ void foo10() {
   _Float16 _Complex b = -a;
 }
 
-// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["a"]
-// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
-// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
-// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> 
-> !cir.f16
-// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> 
-> !cir.f16
-// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), 
!cir.float
-// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), 
!cir.float
-// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], 
%[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
-// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
-// CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
-// CIR: %[[RESULT_REAL_F32:.*]] = cir.unary(minus, %[[A_REAL_F32:.*]]) : 
!cir.float, !cir.float
-// CIR: %[[RESULT_IMAG_F32:.*]] = cir.unary(minus, %[[A_IMAG_F32:.*]]) : 
!cir.float, !cir.float
-// CIR: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create %[[RESULT_REAL_F32]], 
%[[RESULT_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
-// CIR: %[[RESULT_REAL_F32:.*]] = cir.complex.real %[[RESULT_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
-// CIR: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag %[[RESULT_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
-// CIR: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, %[[RESULT_REAL_F32]] : 
!cir.float), !cir.f16
-// CIR: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, %[[RESULT_IMAG_F32]] : 
!cir.float), !cir.f16
-// CIR: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create %[[RESULT_REAL_F16]], 
%[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
-// CIR: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : 
!cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
+// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["a"]
+// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
+// CIR-BEFORE: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
+// CIR-BEFORE: %[[A_COMPLEX_F32:.*]] = cir.cast(float_complex, %[[TMP_A]] : 
!cir.complex<!cir.f16>), !cir.complex<!cir.float>
+// CIR-BEFORE: %[[RESULT:.*]] = cir.unary(minus, %[[A_COMPLEX_F32]]) : 
!cir.complex<!cir.float>, !cir.complex<!cir.float>
+// CIR-BEFORE: %[[A_COMPLEX_F16:.*]] = cir.cast(float_complex, %[[RESULT]] : 
!cir.complex<!cir.float>), !cir.complex<!cir.f16>
+// CIR-BEFORE: cir.store{{.*}} %[[A_COMPLEX_F16]], %[[B_ADDR]] : 
!cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
+
+// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["a"]
+// CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, 
!cir.ptr<!cir.complex<!cir.f16>>, ["b", init]
+// CIR-AFTER: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : 
!cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
+// CIR-AFTER: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : 
!cir.complex<!cir.f16> -> !cir.f16
+// CIR-AFTER: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : 
!cir.complex<!cir.f16> -> !cir.f16
+// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), 
!cir.float
+// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), 
!cir.float
+// CIR-AFTER: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], 
%[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
+// CIR-AFTER: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : 
!cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.unary(minus, %[[A_REAL_F32]]) : 
!cir.float, !cir.float
+// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.unary(minus, %[[A_IMAG_F32]]) : 
!cir.float, !cir.float
+// CIR-AFTER: %[[RESULT_COMPLEX_F32:.*]] = cir.complex.create 
%[[RESULT_REAL_F32]], %[[RESULT_IMAG_F32]] : !cir.float -> 
!cir.complex<!cir.float>
+// CIR-AFTER: %[[RESULT_REAL_F32:.*]] = cir.complex.real 
%[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_IMAG_F32:.*]] = cir.complex.imag 
%[[RESULT_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
+// CIR-AFTER: %[[RESULT_REAL_F16:.*]] = cir.cast(floating, 
%[[RESULT_REAL_F32]] : !cir.float), !cir.f16
+// CIR-AFTER: %[[RESULT_IMAG_F16:.*]] = cir.cast(floating, 
%[[RESULT_IMAG_F32]] : !cir.float), !cir.f16
+// CIR-AFTER: %[[RESULT_COMPLEX_F16:.*]] = cir.complex.create 
%[[RESULT_REAL_F16]], %[[RESULT_IMAG_F16]] : !cir.f16 -> !cir.complex<!cir.f16>
+// CIR-AFTER: cir.store{{.*}} %[[RESULT_COMPLEX_F16]], %[[B_ADDR]] : 
!cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>
 
 // LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
 // LLVM: %[[B_ADDR:.*]] = alloca { half, half }, i64 1, align 2

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to