https://github.com/adams381 created 
https://github.com/llvm/llvm-project/pull/168662

This commit adds full support for thread-local storage variables in ClangIR, 
including code generation, lowering to LLVM IR, and comprehensive testing.

Changes include:
- Added CIR_TLSModel enum with 4 TLS models (GeneralDynamic, LocalDynamic, 
InitialExec, LocalExec) to CIROps.td
- Extended GlobalOp with optional tls_model attribute
- Extended GetGlobalOp with thread_local unit attribute
- Added verification to ensure thread_local GetGlobalOp references globals with 
tls_model set
- Implemented GetDefaultCIRTLSModel() and setTLSMode() in CIRGenModule
- Updated getAddrOfGlobalVar() to handle TLS access
- Removed MissingFeatures assertions for TLS operations
- Added lowering of GetGlobalOp with TLS to llvm.threadlocal.address intrinsic
- Added lowering of GlobalOp with tls_model to LLVM thread_local globals
- Added comprehensive test with CIR, LLVM, and OGCG checks

Known limitations (matching incubator):
- Static local TLS variables not yet implemented
- TLS_Dynamic with wrapper functions not yet implemented

This fixes issue #153270

>From 7abb2167def4dfceeb96c4b231a94fd8308c6e08 Mon Sep 17 00:00:00 2001
From: Adam Smith <[email protected]>
Date: Wed, 5 Nov 2025 13:04:40 -0800
Subject: [PATCH 01/12] [CIR] Upstream overflow builtins

This implements the builtins that handle overflow.
---
 .../CIR/Dialect/Builder/CIRBaseBuilder.h      |  14 +
 clang/include/clang/CIR/Dialect/IR/CIROps.td  |  76 ++++
 clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp       | 194 ++++++++++
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 112 ++++++
 clang/test/CIR/CodeGen/builtins-overflow.cpp  | 364 ++++++++++++++++++
 5 files changed, 760 insertions(+)
 create mode 100644 clang/test/CIR/CodeGen/builtins-overflow.cpp

diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h 
b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index 3288f5b12c77e..6c1951714ba1f 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -408,6 +408,20 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
                            callee.getFunctionType().getReturnType(), operands);
   }
 
+  struct BinOpOverflowResults {
+    mlir::Value result;
+    mlir::Value overflow;
+  };
+
+  BinOpOverflowResults createBinOpOverflowOp(mlir::Location loc,
+                                             cir::IntType resultTy,
+                                             cir::BinOpOverflowKind kind,
+                                             mlir::Value lhs, mlir::Value rhs) 
{
+    auto op =
+        cir::BinOpOverflowOp::create(*this, loc, resultTy, kind, lhs, rhs);
+    return {op.getResult(), op.getOverflow()};
+  }
+
   
//===--------------------------------------------------------------------===//
   // Cast/Conversion Operators
   
//===--------------------------------------------------------------------===//
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td 
b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index dc56db1bbd4ea..328880d6f3581 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -1628,6 +1628,82 @@ def CIR_CmpOp : CIR_Op<"cmp", [Pure, SameTypeOperands]> {
   let isLLVMLoweringRecursive = true;
 }
 
+//===----------------------------------------------------------------------===//
+// BinOpOverflowOp
+//===----------------------------------------------------------------------===//
+
+def CIR_BinOpOverflowKind : CIR_I32EnumAttr<
+  "BinOpOverflowKind", "checked binary arithmetic operation kind", [
+    I32EnumAttrCase<"Add", 0, "add">,
+    I32EnumAttrCase<"Sub", 1, "sub">,
+    I32EnumAttrCase<"Mul", 2, "mul">
+]>;
+
+def CIR_BinOpOverflowOp : CIR_Op<"binop.overflow", [Pure, SameTypeOperands]> {
+  let summary = "Perform binary integral arithmetic with overflow checking";
+  let description = [{
+    `cir.binop.overflow` performs binary arithmetic operations with overflow
+    checking on integral operands.
+
+    The `kind` argument specifies the kind of arithmetic operation to perform.
+    It can be either `add`, `sub`, or `mul`. The `lhs` and `rhs` arguments
+    specify the input operands of the arithmetic operation. The types of `lhs`
+    and `rhs` must be the same.
+
+    `cir.binop.overflow` produces two SSA values. `result` is the result of the
+    arithmetic operation truncated to its specified type. `overflow` is a
+    boolean value indicating whether overflow happens during the operation.
+
+    The exact semantic of this operation is as follows:
+
+      - `lhs` and `rhs` are promoted to an imaginary integral type that has
+        infinite precision.
+      - The arithmetic operation is performed on the promoted operands.
+      - The infinite-precision result is truncated to the type of `result`. The
+        truncated result is assigned to `result`.
+      - If the truncated result is equal to the un-truncated result, `overflow`
+        is assigned to false. Otherwise, `overflow` is assigned to true.
+  }];
+
+  let arguments = (ins
+    CIR_BinOpOverflowKind:$kind,
+    CIR_IntType:$lhs,
+    CIR_IntType:$rhs
+  );
+
+  let results = (outs CIR_IntType:$result, CIR_BoolType:$overflow);
+
+  let assemblyFormat = [{
+    `(` $kind `,` $lhs `,` $rhs `)` `:` type($lhs) `,`
+    `(` type($result) `,` type($overflow) `)`
+    attr-dict
+  }];
+
+  let builders = [
+    OpBuilder<(ins "cir::IntType":$resultTy,
+                   "cir::BinOpOverflowKind":$kind,
+                   "mlir::Value":$lhs,
+                   "mlir::Value":$rhs), [{
+      auto overflowTy = cir::BoolType::get($_builder.getContext());
+      build($_builder, $_state, resultTy, overflowTy, kind, lhs, rhs);
+    }]>
+  ];
+
+  let extraLLVMLoweringPatternDecl = [{
+    static std::string getLLVMIntrinName(cir::BinOpOverflowKind opKind,
+                                         bool isSigned, unsigned width);
+
+    struct EncompassedTypeInfo {
+      bool sign;
+      unsigned width;
+    };
+
+    static EncompassedTypeInfo computeEncompassedTypeWidth(cir::IntType 
operandTy,
+                                                           cir::IntType 
resultTy);
+  }];
+}
+
+
 
//===----------------------------------------------------------------------===//
 // BinOp
 
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index d9b9e3b877b50..19ce15ca5aeeb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -58,6 +58,52 @@ static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const 
CallExpr *e,
   return RValue::get(result);
 }
 
+namespace {
+struct WidthAndSignedness {
+  unsigned Width;
+  bool Signed;
+};
+} // namespace
+
+static WidthAndSignedness
+getIntegerWidthAndSignedness(const clang::ASTContext &astContext,
+                             const clang::QualType Type) {
+  assert(Type->isIntegerType() && "Given type is not an integer.");
+  unsigned Width = Type->isBooleanType()  ? 1
+                   : Type->isBitIntType() ? astContext.getIntWidth(Type)
+                                          : astContext.getTypeInfo(Type).Width;
+  bool Signed = Type->isSignedIntegerType();
+  return {Width, Signed};
+}
+
+// Given one or more integer types, this function produces an integer type that
+// encompasses them: any value in one of the given types could be expressed in
+// the encompassing type.
+static struct WidthAndSignedness
+EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
+  assert(Types.size() > 0 && "Empty list of types.");
+
+  // If any of the given types is signed, we must return a signed type.
+  bool Signed = false;
+  for (const auto &Type : Types) {
+    Signed |= Type.Signed;
+  }
+
+  // The encompassing type must have a width greater than or equal to the width
+  // of the specified types.  Additionally, if the encompassing type is signed,
+  // its width must be strictly greater than the width of any unsigned types
+  // given.
+  unsigned Width = 0;
+  for (const auto &Type : Types) {
+    unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
+    if (Width < MinWidth) {
+      Width = MinWidth;
+    }
+  }
+
+  return {Width, Signed};
+}
+
 RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
   mlir::Value input = emitScalarExpr(e->getArg(0));
   mlir::Value amount = emitScalarExpr(e->getArg(1));
@@ -491,6 +537,154 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl 
&gd, unsigned builtinID,
     cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
     return RValue::get(nullptr);
   }
+  case Builtin::BI__builtin_add_overflow:
+  case Builtin::BI__builtin_sub_overflow:
+  case Builtin::BI__builtin_mul_overflow: {
+    const clang::Expr *LeftArg = e->getArg(0);
+    const clang::Expr *RightArg = e->getArg(1);
+    const clang::Expr *ResultArg = e->getArg(2);
+
+    clang::QualType ResultQTy =
+        ResultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
+
+    WidthAndSignedness LeftInfo =
+        getIntegerWidthAndSignedness(cgm.getASTContext(), LeftArg->getType());
+    WidthAndSignedness RightInfo =
+        getIntegerWidthAndSignedness(cgm.getASTContext(), RightArg->getType());
+    WidthAndSignedness ResultInfo =
+        getIntegerWidthAndSignedness(cgm.getASTContext(), ResultQTy);
+
+    // Note we compute the encompassing type with the consideration to the
+    // result type, so later in LLVM lowering we don't get redundant integral
+    // extension casts.
+    WidthAndSignedness EncompassingInfo =
+        EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
+
+    auto EncompassingCIRTy = cir::IntType::get(
+        &getMLIRContext(), EncompassingInfo.Width, EncompassingInfo.Signed);
+    auto ResultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(ResultQTy));
+
+    mlir::Value Left = emitScalarExpr(LeftArg);
+    mlir::Value Right = emitScalarExpr(RightArg);
+    Address ResultPtr = emitPointerWithAlignment(ResultArg);
+
+    // Extend each operand to the encompassing type, if necessary.
+    if (Left.getType() != EncompassingCIRTy)
+      Left =
+          builder.createCast(cir::CastKind::integral, Left, EncompassingCIRTy);
+    if (Right.getType() != EncompassingCIRTy)
+      Right =
+          builder.createCast(cir::CastKind::integral, Right, 
EncompassingCIRTy);
+
+    // Perform the operation on the extended values.
+    cir::BinOpOverflowKind OpKind;
+    switch (builtinID) {
+    default:
+      llvm_unreachable("Unknown overflow builtin id.");
+    case Builtin::BI__builtin_add_overflow:
+      OpKind = cir::BinOpOverflowKind::Add;
+      break;
+    case Builtin::BI__builtin_sub_overflow:
+      OpKind = cir::BinOpOverflowKind::Sub;
+      break;
+    case Builtin::BI__builtin_mul_overflow:
+      OpKind = cir::BinOpOverflowKind::Mul;
+      break;
+    }
+
+    auto Loc = getLoc(e->getSourceRange());
+    auto ArithResult =
+        builder.createBinOpOverflowOp(Loc, ResultCIRTy, OpKind, Left, Right);
+
+    // Here is a slight difference from the original clang CodeGen:
+    //   - In the original clang CodeGen, the checked arithmetic result is
+    //     first computed as a value of the encompassing type, and then it is
+    //     truncated to the actual result type with a second overflow checking.
+    //   - In CIRGen, the checked arithmetic operation directly produce the
+    //     checked arithmetic result in its expected type.
+    //
+    // So we don't need a truncation and a second overflow checking here.
+
+    // Finally, store the result using the pointer.
+    bool isVolatile =
+        ResultArg->getType()->getPointeeType().isVolatileQualified();
+    builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy),
+                        ResultPtr, isVolatile);
+
+    return RValue::get(ArithResult.overflow);
+  }
+
+  case Builtin::BI__builtin_uadd_overflow:
+  case Builtin::BI__builtin_uaddl_overflow:
+  case Builtin::BI__builtin_uaddll_overflow:
+  case Builtin::BI__builtin_usub_overflow:
+  case Builtin::BI__builtin_usubl_overflow:
+  case Builtin::BI__builtin_usubll_overflow:
+  case Builtin::BI__builtin_umul_overflow:
+  case Builtin::BI__builtin_umull_overflow:
+  case Builtin::BI__builtin_umulll_overflow:
+  case Builtin::BI__builtin_sadd_overflow:
+  case Builtin::BI__builtin_saddl_overflow:
+  case Builtin::BI__builtin_saddll_overflow:
+  case Builtin::BI__builtin_ssub_overflow:
+  case Builtin::BI__builtin_ssubl_overflow:
+  case Builtin::BI__builtin_ssubll_overflow:
+  case Builtin::BI__builtin_smul_overflow:
+  case Builtin::BI__builtin_smull_overflow:
+  case Builtin::BI__builtin_smulll_overflow: {
+    // Scalarize our inputs.
+    mlir::Value X = emitScalarExpr(e->getArg(0));
+    mlir::Value Y = emitScalarExpr(e->getArg(1));
+
+    const clang::Expr *ResultArg = e->getArg(2);
+    Address ResultPtr = emitPointerWithAlignment(ResultArg);
+
+    // Decide which of the arithmetic operation we are lowering to:
+    cir::BinOpOverflowKind ArithKind;
+    switch (builtinID) {
+    default:
+      llvm_unreachable("Unknown overflow builtin id.");
+    case Builtin::BI__builtin_uadd_overflow:
+    case Builtin::BI__builtin_uaddl_overflow:
+    case Builtin::BI__builtin_uaddll_overflow:
+    case Builtin::BI__builtin_sadd_overflow:
+    case Builtin::BI__builtin_saddl_overflow:
+    case Builtin::BI__builtin_saddll_overflow:
+      ArithKind = cir::BinOpOverflowKind::Add;
+      break;
+    case Builtin::BI__builtin_usub_overflow:
+    case Builtin::BI__builtin_usubl_overflow:
+    case Builtin::BI__builtin_usubll_overflow:
+    case Builtin::BI__builtin_ssub_overflow:
+    case Builtin::BI__builtin_ssubl_overflow:
+    case Builtin::BI__builtin_ssubll_overflow:
+      ArithKind = cir::BinOpOverflowKind::Sub;
+      break;
+    case Builtin::BI__builtin_umul_overflow:
+    case Builtin::BI__builtin_umull_overflow:
+    case Builtin::BI__builtin_umulll_overflow:
+    case Builtin::BI__builtin_smul_overflow:
+    case Builtin::BI__builtin_smull_overflow:
+    case Builtin::BI__builtin_smulll_overflow:
+      ArithKind = cir::BinOpOverflowKind::Mul;
+      break;
+    }
+
+    clang::QualType ResultQTy =
+        ResultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
+    auto ResultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(ResultQTy));
+
+    auto Loc = getLoc(e->getSourceRange());
+    auto ArithResult =
+        builder.createBinOpOverflowOp(Loc, ResultCIRTy, ArithKind, X, Y);
+
+    bool isVolatile =
+        ResultArg->getType()->getPointeeType().isVolatileQualified();
+    builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy),
+                        ResultPtr, isVolatile);
+
+    return RValue::get(ArithResult.overflow);
+  }
   }
 
   // If this is an alias for a lib function (e.g. __builtin_sin), emit
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index d94108294a9a3..c81f7cc657137 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2503,6 +2503,118 @@ mlir::LogicalResult 
CIRToLLVMCmpOpLowering::matchAndRewrite(
   return cmpOp.emitError() << "unsupported type for CmpOp: " << type;
 }
 
+mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
+    cir::BinOpOverflowOp op, OpAdaptor adaptor,
+    mlir::ConversionPatternRewriter &rewriter) const {
+  auto loc = op.getLoc();
+  auto arithKind = op.getKind();
+  auto operandTy = op.getLhs().getType();
+  auto resultTy = op.getResult().getType();
+
+  auto encompassedTyInfo = computeEncompassedTypeWidth(operandTy, resultTy);
+  auto encompassedLLVMTy = rewriter.getIntegerType(encompassedTyInfo.width);
+
+  auto lhs = adaptor.getLhs();
+  auto rhs = adaptor.getRhs();
+  if (operandTy.getWidth() < encompassedTyInfo.width) {
+    if (operandTy.isSigned()) {
+      lhs = rewriter.create<mlir::LLVM::SExtOp>(loc, encompassedLLVMTy, lhs);
+      rhs = rewriter.create<mlir::LLVM::SExtOp>(loc, encompassedLLVMTy, rhs);
+    } else {
+      lhs = rewriter.create<mlir::LLVM::ZExtOp>(loc, encompassedLLVMTy, lhs);
+      rhs = rewriter.create<mlir::LLVM::ZExtOp>(loc, encompassedLLVMTy, rhs);
+    }
+  }
+
+  auto intrinName = getLLVMIntrinName(arithKind, encompassedTyInfo.sign,
+                                      encompassedTyInfo.width);
+  auto intrinNameAttr = mlir::StringAttr::get(op.getContext(), intrinName);
+
+  auto overflowLLVMTy = rewriter.getI1Type();
+  auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral(
+      rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
+
+  auto callLLVMIntrinOp = rewriter.create<mlir::LLVM::CallIntrinsicOp>(
+      loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs});
+  auto intrinRet = callLLVMIntrinOp.getResult(0);
+
+  auto result = rewriter
+                    .create<mlir::LLVM::ExtractValueOp>(loc, intrinRet,
+                                                        ArrayRef<int64_t>{0})
+                    .getResult();
+  auto overflow = rewriter
+                      .create<mlir::LLVM::ExtractValueOp>(loc, intrinRet,
+                                                          ArrayRef<int64_t>{1})
+                      .getResult();
+
+  if (resultTy.getWidth() < encompassedTyInfo.width) {
+    auto resultLLVMTy = getTypeConverter()->convertType(resultTy);
+    auto truncResult =
+        rewriter.create<mlir::LLVM::TruncOp>(loc, resultLLVMTy, result);
+
+    // Extend the truncated result back to the encompassing type to check for
+    // any overflows during the truncation.
+    mlir::Value truncResultExt;
+    if (resultTy.isSigned())
+      truncResultExt = rewriter.create<mlir::LLVM::SExtOp>(
+          loc, encompassedLLVMTy, truncResult);
+    else
+      truncResultExt = rewriter.create<mlir::LLVM::ZExtOp>(
+          loc, encompassedLLVMTy, truncResult);
+    auto truncOverflow = rewriter.create<mlir::LLVM::ICmpOp>(
+        loc, mlir::LLVM::ICmpPredicate::ne, truncResultExt, result);
+
+    result = truncResult;
+    overflow = rewriter.create<mlir::LLVM::OrOp>(loc, overflow, truncOverflow);
+  }
+
+  auto boolLLVMTy = 
getTypeConverter()->convertType(op.getOverflow().getType());
+  if (boolLLVMTy != rewriter.getI1Type())
+    overflow = rewriter.create<mlir::LLVM::ZExtOp>(loc, boolLLVMTy, overflow);
+
+  rewriter.replaceOp(op, mlir::ValueRange{result, overflow});
+
+  return mlir::success();
+}
+
+std::string CIRToLLVMBinOpOverflowOpLowering::getLLVMIntrinName(
+    cir::BinOpOverflowKind opKind, bool isSigned, unsigned width) {
+  // The intrinsic name is `@llvm.{s|u}{opKind}.with.overflow.i{width}`
+
+  std::string name = "llvm.";
+
+  if (isSigned)
+    name.push_back('s');
+  else
+    name.push_back('u');
+
+  switch (opKind) {
+  case cir::BinOpOverflowKind::Add:
+    name.append("add.");
+    break;
+  case cir::BinOpOverflowKind::Sub:
+    name.append("sub.");
+    break;
+  case cir::BinOpOverflowKind::Mul:
+    name.append("mul.");
+    break;
+  }
+
+  name.append("with.overflow.i");
+  name.append(std::to_string(width));
+
+  return name;
+}
+
+CIRToLLVMBinOpOverflowOpLowering::EncompassedTypeInfo
+CIRToLLVMBinOpOverflowOpLowering::computeEncompassedTypeWidth(
+    cir::IntType operandTy, cir::IntType resultTy) {
+  auto sign = operandTy.getIsSigned() || resultTy.getIsSigned();
+  auto width = std::max(operandTy.getWidth() + (sign && 
operandTy.isUnsigned()),
+                        resultTy.getWidth() + (sign && resultTy.isUnsigned()));
+  return {sign, width};
+}
+
 mlir::LogicalResult CIRToLLVMShiftOpLowering::matchAndRewrite(
     cir::ShiftOp op, OpAdaptor adaptor,
     mlir::ConversionPatternRewriter &rewriter) const {
diff --git a/clang/test/CIR/CodeGen/builtins-overflow.cpp 
b/clang/test/CIR/CodeGen/builtins-overflow.cpp
new file mode 100644
index 0000000000000..8cd227d58686d
--- /dev/null
+++ b/clang/test/CIR/CodeGen/builtins-overflow.cpp
@@ -0,0 +1,364 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o 
%t.cir
+// RUN: FileCheck %s --check-prefix=CIR --input-file=%t.cir
+
+bool test_add_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) {
+  return __builtin_add_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z32test_add_overflow_uint_uint_uintjjPj
+//      CIR:   %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], 
%[[#RHS]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+//      CIR: }
+
+bool test_add_overflow_int_int_int(int x, int y, int *res) {
+  return __builtin_add_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z29test_add_overflow_int_int_intiiPi
+//      CIR:   %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], 
%[[#RHS]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+//      CIR: }
+
+bool test_add_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, 
_BitInt(31) *res) {
+  return __builtin_add_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local 
@_Z38test_add_overflow_xint31_xint31_xint31DB31_S_PS_
+//      CIR:   %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 
31>>, !cir.int<s, 31>
+// CIR-NEXT:   %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 
31>>, !cir.int<s, 31>
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!cir.int<s, 31>>>, !cir.ptr<!cir.int<s, 31>>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#LHS]], 
%[[#RHS]]) : <s, 31>, (<s, 31>, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !cir.int<s, 31>, 
!cir.ptr<!cir.int<s, 31>>
+//      CIR: }
+
+bool test_sub_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) {
+  return __builtin_sub_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z32test_sub_overflow_uint_uint_uintjjPj
+//      CIR:   %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], 
%[[#RHS]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+//      CIR: }
+
+bool test_sub_overflow_int_int_int(int x, int y, int *res) {
+  return __builtin_sub_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z29test_sub_overflow_int_int_intiiPi
+//      CIR:   %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], 
%[[#RHS]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+//      CIR: }
+
+bool test_sub_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, 
_BitInt(31) *res) {
+  return __builtin_sub_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local 
@_Z38test_sub_overflow_xint31_xint31_xint31DB31_S_PS_
+//      CIR:   %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 
31>>, !cir.int<s, 31>
+// CIR-NEXT:   %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 
31>>, !cir.int<s, 31>
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!cir.int<s, 31>>>, !cir.ptr<!cir.int<s, 31>>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#LHS]], 
%[[#RHS]]) : <s, 31>, (<s, 31>, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !cir.int<s, 31>, 
!cir.ptr<!cir.int<s, 31>>
+//      CIR: }
+
+bool test_mul_overflow_uint_uint_uint(unsigned x, unsigned y, unsigned *res) {
+  return __builtin_mul_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z32test_mul_overflow_uint_uint_uintjjPj
+//      CIR:   %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], 
%[[#RHS]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+//      CIR: }
+
+bool test_mul_overflow_int_int_int(int x, int y, int *res) {
+  return __builtin_mul_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z29test_mul_overflow_int_int_intiiPi
+//      CIR:   %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], 
%[[#RHS]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+//      CIR: }
+
+bool test_mul_overflow_xint31_xint31_xint31(_BitInt(31) x, _BitInt(31) y, 
_BitInt(31) *res) {
+  return __builtin_mul_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local 
@_Z38test_mul_overflow_xint31_xint31_xint31DB31_S_PS_
+//      CIR:   %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 
31>>, !cir.int<s, 31>
+// CIR-NEXT:   %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!cir.int<s, 
31>>, !cir.int<s, 31>
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!cir.int<s, 31>>>, !cir.ptr<!cir.int<s, 31>>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], 
%[[#RHS]]) : <s, 31>, (<s, 31>, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !cir.int<s, 31>, 
!cir.ptr<!cir.int<s, 31>>
+//      CIR: }
+
+bool test_mul_overflow_ulong_ulong_long(unsigned long x, unsigned long y, 
unsigned long *res) {
+  return __builtin_mul_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z34test_mul_overflow_ulong_ulong_longmmPm
+//      CIR:   %[[#LHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#RHS:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#LHS]], 
%[[#RHS]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+//      CIR: }
+
+bool test_add_overflow_uint_int_int(unsigned x, int y, int *res) {
+  return __builtin_add_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z30test_add_overflow_uint_int_intjiPi
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT:   %[[#PROM_X:]] = cir.cast integral %[[#X]] : !u32i -> 
!cir.int<s, 33>
+// CIR-NEXT:   %[[#PROM_Y:]] = cir.cast integral %[[#Y]] : !s32i -> 
!cir.int<s, 33>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#PROM_X]], 
%[[#PROM_Y]]) : <s, 33>, (!s32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+//      CIR: }
+
+bool test_add_overflow_volatile(int x, int y, volatile int *res) {
+  return __builtin_add_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z26test_add_overflow_volatileiiPVi
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], 
%[[#Y]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT:   cir.store volatile{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, 
!cir.ptr<!s32i>
+//      CIR: }
+
+bool test_uadd_overflow(unsigned x, unsigned y, unsigned *res) {
+  return __builtin_uadd_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z18test_uadd_overflowjjPj
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], 
%[[#Y]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+//      CIR: }
+
+bool test_uaddl_overflow(unsigned long x, unsigned long y, unsigned long *res) 
{
+  return __builtin_uaddl_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z19test_uaddl_overflowmmPm
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], 
%[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+//      CIR: }
+
+bool test_uaddll_overflow(unsigned long long x, unsigned long long y, unsigned 
long long *res) {
+  return __builtin_uaddll_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z20test_uaddll_overflowyyPy
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], 
%[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+//      CIR: }
+
+bool test_usub_overflow(unsigned x, unsigned y, unsigned *res) {
+  return __builtin_usub_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z18test_usub_overflowjjPj
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], 
%[[#Y]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+//      CIR: }
+
+bool test_usubl_overflow(unsigned long x, unsigned long y, unsigned long *res) 
{
+  return __builtin_usubl_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z19test_usubl_overflowmmPm
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], 
%[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+//      CIR: }
+
+bool test_usubll_overflow(unsigned long long x, unsigned long long y, unsigned 
long long *res) {
+  return __builtin_usubll_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z20test_usubll_overflowyyPy
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], 
%[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+//      CIR: }
+
+bool test_umul_overflow(unsigned x, unsigned y, unsigned *res) {
+  return __builtin_umul_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z18test_umul_overflowjjPj
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u32i>, !u32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u32i>>, !cir.ptr<!u32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], 
%[[#Y]]) : !u32i, (!u32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u32i, !cir.ptr<!u32i>
+//      CIR: }
+
+bool test_umull_overflow(unsigned long x, unsigned long y, unsigned long *res) 
{
+  return __builtin_umull_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z19test_umull_overflowmmPm
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], 
%[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+//      CIR: }
+
+bool test_umulll_overflow(unsigned long long x, unsigned long long y, unsigned 
long long *res) {
+  return __builtin_umulll_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z20test_umulll_overflowyyPy
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!u64i>, !u64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!u64i>>, !cir.ptr<!u64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], 
%[[#Y]]) : !u64i, (!u64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !u64i, !cir.ptr<!u64i>
+//      CIR: }
+
+bool test_sadd_overflow(int x, int y, int *res) {
+  return __builtin_sadd_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z18test_sadd_overflowiiPi
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], 
%[[#Y]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+//      CIR: }
+
+bool test_saddl_overflow(long x, long y, long *res) {
+  return __builtin_saddl_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z19test_saddl_overflowllPl
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], 
%[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+//      CIR: }
+
+bool test_saddll_overflow(long long x, long long y, long long *res) {
+  return __builtin_saddll_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z20test_saddll_overflowxxPx
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(add, %[[#X]], 
%[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+//      CIR: }
+
+bool test_ssub_overflow(int x, int y, int *res) {
+  return __builtin_ssub_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z18test_ssub_overflowiiPi
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], 
%[[#Y]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+//      CIR: }
+
+bool test_ssubl_overflow(long x, long y, long *res) {
+  return __builtin_ssubl_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z19test_ssubl_overflowllPl
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], 
%[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+//      CIR: }
+
+bool test_ssubll_overflow(long long x, long long y, long long *res) {
+  return __builtin_ssubll_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z20test_ssubll_overflowxxPx
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(sub, %[[#X]], 
%[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+//      CIR: }
+
+bool test_smul_overflow(int x, int y, int *res) {
+  return __builtin_smul_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z18test_smul_overflowiiPi
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s32i>, !s32i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s32i>>, !cir.ptr<!s32i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], 
%[[#Y]]) : !s32i, (!s32i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s32i, !cir.ptr<!s32i>
+//      CIR: }
+
+bool test_smull_overflow(long x, long y, long *res) {
+  return __builtin_smull_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z19test_smull_overflowllPl
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], 
%[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+//      CIR: }
+
+bool test_smulll_overflow(long long x, long long y, long long *res) {
+  return __builtin_smulll_overflow(x, y, res);
+}
+
+//      CIR: cir.func dso_local @_Z20test_smulll_overflowxxPx
+//      CIR:   %[[#X:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#Y:]] = cir.load{{.*}} %{{.+}} : !cir.ptr<!s64i>, !s64i
+// CIR-NEXT:   %[[#RES_PTR:]] = cir.load{{.*}} %{{.+}} : 
!cir.ptr<!cir.ptr<!s64i>>, !cir.ptr<!s64i>
+// CIR-NEXT:   %[[RES:.+]], %{{.+}} = cir.binop.overflow(mul, %[[#X]], 
%[[#Y]]) : !s64i, (!s64i, !cir.bool)
+// CIR-NEXT:   cir.store{{.*}} %[[RES]], %[[#RES_PTR]] : !s64i, !cir.ptr<!s64i>
+//      CIR: }

>From d03208983759d5fcffc1531b5c0c99aa57275e7f Mon Sep 17 00:00:00 2001
From: adams381 <[email protected]>
Date: Tue, 18 Nov 2025 15:48:57 -0600
Subject: [PATCH 02/12] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp

Co-authored-by: Henrich Lauko <[email protected]>
---
 clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 10 +++-------
 1 file changed, 3 insertions(+), 7 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 19ce15ca5aeeb..c56ae5ac9028b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -93,13 +93,9 @@ EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> 
Types) {
   // of the specified types.  Additionally, if the encompassing type is signed,
   // its width must be strictly greater than the width of any unsigned types
   // given.
-  unsigned Width = 0;
-  for (const auto &Type : Types) {
-    unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
-    if (Width < MinWidth) {
-      Width = MinWidth;
-    }
-  }
+unsigned Width = 0;
+for (const auto &Type : Types)
+  Width = std::max(Width, Type.Width + (Signed && !Type.Signed));
 
   return {Width, Signed};
 }

>From 7d73c087a161cfe1f8dd9f03ac2f8e28bce62018 Mon Sep 17 00:00:00 2001
From: adams381 <[email protected]>
Date: Tue, 18 Nov 2025 15:49:26 -0600
Subject: [PATCH 03/12] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp

Co-authored-by: Henrich Lauko <[email protected]>
---
 clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index c56ae5ac9028b..ce53b4039e1f9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -84,10 +84,7 @@ EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> 
Types) {
   assert(Types.size() > 0 && "Empty list of types.");
 
   // If any of the given types is signed, we must return a signed type.
-  bool Signed = false;
-  for (const auto &Type : Types) {
-    Signed |= Type.Signed;
-  }
+bool Signed = llvm::any_of(Types, [](const auto &T) { return T.Signed; });
 
   // The encompassing type must have a width greater than or equal to the width
   // of the specified types.  Additionally, if the encompassing type is signed,

>From 848fbe751dd73e588b95d8b3781df622f7e56c13 Mon Sep 17 00:00:00 2001
From: adams381 <[email protected]>
Date: Tue, 18 Nov 2025 15:57:21 -0600
Subject: [PATCH 04/12] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp

Co-authored-by: Andy Kaylor <[email protected]>
---
 clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index ce53b4039e1f9..6caad2c15fb86 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -67,7 +67,7 @@ struct WidthAndSignedness {
 
 static WidthAndSignedness
 getIntegerWidthAndSignedness(const clang::ASTContext &astContext,
-                             const clang::QualType Type) {
+                             const clang::QualType type) {
   assert(Type->isIntegerType() && "Given type is not an integer.");
   unsigned Width = Type->isBooleanType()  ? 1
                    : Type->isBitIntType() ? astContext.getIntWidth(Type)

>From d42d9281957c4533063bcd0eb286cf654b2fa2f0 Mon Sep 17 00:00:00 2001
From: adams381 <[email protected]>
Date: Tue, 18 Nov 2025 15:57:36 -0600
Subject: [PATCH 05/12] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp

Co-authored-by: Andy Kaylor <[email protected]>
---
 clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 6caad2c15fb86..be944d0bd9ce0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -72,7 +72,7 @@ getIntegerWidthAndSignedness(const clang::ASTContext 
&astContext,
   unsigned Width = Type->isBooleanType()  ? 1
                    : Type->isBitIntType() ? astContext.getIntWidth(Type)
                                           : astContext.getTypeInfo(Type).Width;
-  bool Signed = Type->isSignedIntegerType();
+  bool signed = Type->isSignedIntegerType();
   return {Width, Signed};
 }
 

>From e2b36299c550f3b25fd84420f099b3ba436083e5 Mon Sep 17 00:00:00 2001
From: adams381 <[email protected]>
Date: Tue, 18 Nov 2025 18:06:14 -0600
Subject: [PATCH 06/12] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp

Co-authored-by: Andy Kaylor <[email protected]>
---
 clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index be944d0bd9ce0..cd0fd5177297b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -60,8 +60,8 @@ static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const 
CallExpr *e,
 
 namespace {
 struct WidthAndSignedness {
-  unsigned Width;
-  bool Signed;
+  unsigned width;
+  bool signed;
 };
 } // namespace
 

>From f77a61d165ea31e989808d66d5fb1ba1ec523776 Mon Sep 17 00:00:00 2001
From: adams381 <[email protected]>
Date: Tue, 18 Nov 2025 18:06:37 -0600
Subject: [PATCH 07/12] Update clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp

Co-authored-by: Andy Kaylor <[email protected]>
---
 clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index cd0fd5177297b..f965450fc7d89 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -69,7 +69,7 @@ static WidthAndSignedness
 getIntegerWidthAndSignedness(const clang::ASTContext &astContext,
                              const clang::QualType type) {
   assert(Type->isIntegerType() && "Given type is not an integer.");
-  unsigned Width = Type->isBooleanType()  ? 1
+  unsigned width = type->isBooleanType()  ? 1
                    : Type->isBitIntType() ? astContext.getIntWidth(Type)
                                           : astContext.getTypeInfo(Type).Width;
   bool signed = Type->isSignedIntegerType();

>From 781e7bb0ac38bd974d7ae9b6137d2b995e989762 Mon Sep 17 00:00:00 2001
From: adams381 <[email protected]>
Date: Tue, 18 Nov 2025 18:07:32 -0600
Subject: [PATCH 08/12] Update
 clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp

Co-authored-by: Andy Kaylor <[email protected]>
---
 clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index c81f7cc657137..f1cab4c4ebbbe 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2534,7 +2534,7 @@ mlir::LogicalResult 
CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
   auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral(
       rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
 
-  auto callLLVMIntrinOp = rewriter.create<mlir::LLVM::CallIntrinsicOp>(
+  auto callLLVMIntrinOp = mlir::LLVM::CallIntrinsicOp::create(rewriter,
       loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs});
   auto intrinRet = callLLVMIntrinOp.getResult(0);
 

>From 3b01b5a20b7961fbb485f92ebaef8c39482a4cf9 Mon Sep 17 00:00:00 2001
From: Adam Smith <[email protected]>
Date: Tue, 18 Nov 2025 16:05:17 -0800
Subject: [PATCH 09/12] [CIR] Remove createBinOpOverflowOp helper function

Remove the BinOpOverflowResults struct and createBinOpOverflowOp helper
function from CIRBaseBuilder. Instead, call cir::BinOpOverflowOp::create
directly and use getResult() and getOverflow() on the returned operation.

This simplifies the API and makes it more natural to use, as suggested
by reviewer feedback.
---
 .../clang/CIR/Dialect/Builder/CIRBaseBuilder.h     | 14 --------------
 1 file changed, 14 deletions(-)

diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h 
b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index 6c1951714ba1f..3288f5b12c77e 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -408,20 +408,6 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
                            callee.getFunctionType().getReturnType(), operands);
   }
 
-  struct BinOpOverflowResults {
-    mlir::Value result;
-    mlir::Value overflow;
-  };
-
-  BinOpOverflowResults createBinOpOverflowOp(mlir::Location loc,
-                                             cir::IntType resultTy,
-                                             cir::BinOpOverflowKind kind,
-                                             mlir::Value lhs, mlir::Value rhs) 
{
-    auto op =
-        cir::BinOpOverflowOp::create(*this, loc, resultTy, kind, lhs, rhs);
-    return {op.getResult(), op.getOverflow()};
-  }
-
   
//===--------------------------------------------------------------------===//
   // Cast/Conversion Operators
   
//===--------------------------------------------------------------------===//

>From 05600b8d9ee33478f707de4fec8e18aff8ca6a1c Mon Sep 17 00:00:00 2001
From: Adam Smith <[email protected]>
Date: Tue, 18 Nov 2025 16:23:58 -0800
Subject: [PATCH 10/12] [CIR] Fix style and deprecated API in overflow builtins
 lowering

Replace auto with explicit types and use lowerCamelCase.
Replace deprecated rewriter.create with Op::create.
---
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 89 ++++++++++---------
 1 file changed, 48 insertions(+), 41 deletions(-)

diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index f1cab4c4ebbbe..1896b89f6758f 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -2506,71 +2506,78 @@ mlir::LogicalResult 
CIRToLLVMCmpOpLowering::matchAndRewrite(
 mlir::LogicalResult CIRToLLVMBinOpOverflowOpLowering::matchAndRewrite(
     cir::BinOpOverflowOp op, OpAdaptor adaptor,
     mlir::ConversionPatternRewriter &rewriter) const {
-  auto loc = op.getLoc();
-  auto arithKind = op.getKind();
-  auto operandTy = op.getLhs().getType();
-  auto resultTy = op.getResult().getType();
+  mlir::Location loc = op.getLoc();
+  cir::BinOpOverflowKind arithKind = op.getKind();
+  cir::IntType operandTy = op.getLhs().getType();
+  cir::IntType resultTy = op.getResult().getType();
 
-  auto encompassedTyInfo = computeEncompassedTypeWidth(operandTy, resultTy);
-  auto encompassedLLVMTy = rewriter.getIntegerType(encompassedTyInfo.width);
+  EncompassedTypeInfo encompassedTyInfo =
+      computeEncompassedTypeWidth(operandTy, resultTy);
+  mlir::IntegerType encompassedLLVMTy =
+      rewriter.getIntegerType(encompassedTyInfo.width);
 
-  auto lhs = adaptor.getLhs();
-  auto rhs = adaptor.getRhs();
+  mlir::Value lhs = adaptor.getLhs();
+  mlir::Value rhs = adaptor.getRhs();
   if (operandTy.getWidth() < encompassedTyInfo.width) {
     if (operandTy.isSigned()) {
-      lhs = rewriter.create<mlir::LLVM::SExtOp>(loc, encompassedLLVMTy, lhs);
-      rhs = rewriter.create<mlir::LLVM::SExtOp>(loc, encompassedLLVMTy, rhs);
+      lhs = mlir::LLVM::SExtOp::create(rewriter, loc, encompassedLLVMTy, lhs);
+      rhs = mlir::LLVM::SExtOp::create(rewriter, loc, encompassedLLVMTy, rhs);
     } else {
-      lhs = rewriter.create<mlir::LLVM::ZExtOp>(loc, encompassedLLVMTy, lhs);
-      rhs = rewriter.create<mlir::LLVM::ZExtOp>(loc, encompassedLLVMTy, rhs);
+      lhs = mlir::LLVM::ZExtOp::create(rewriter, loc, encompassedLLVMTy, lhs);
+      rhs = mlir::LLVM::ZExtOp::create(rewriter, loc, encompassedLLVMTy, rhs);
     }
   }
 
-  auto intrinName = getLLVMIntrinName(arithKind, encompassedTyInfo.sign,
-                                      encompassedTyInfo.width);
-  auto intrinNameAttr = mlir::StringAttr::get(op.getContext(), intrinName);
+  std::string intrinName = getLLVMIntrinName(
+      arithKind, encompassedTyInfo.sign, encompassedTyInfo.width);
+  mlir::StringAttr intrinNameAttr =
+      mlir::StringAttr::get(op.getContext(), intrinName);
 
-  auto overflowLLVMTy = rewriter.getI1Type();
-  auto intrinRetTy = mlir::LLVM::LLVMStructType::getLiteral(
-      rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
+  mlir::IntegerType overflowLLVMTy = rewriter.getI1Type();
+  mlir::LLVM::LLVMStructType intrinRetTy =
+      mlir::LLVM::LLVMStructType::getLiteral(
+          rewriter.getContext(), {encompassedLLVMTy, overflowLLVMTy});
 
-  auto callLLVMIntrinOp = mlir::LLVM::CallIntrinsicOp::create(rewriter,
-      loc, intrinRetTy, intrinNameAttr, mlir::ValueRange{lhs, rhs});
-  auto intrinRet = callLLVMIntrinOp.getResult(0);
+  mlir::LLVM::CallIntrinsicOp callLLVMIntrinOp =
+      mlir::LLVM::CallIntrinsicOp::create(rewriter, loc, intrinRetTy,
+                                          intrinNameAttr, 
mlir::ValueRange{lhs, rhs});
+  mlir::Value intrinRet = callLLVMIntrinOp.getResult(0);
 
-  auto result = rewriter
-                    .create<mlir::LLVM::ExtractValueOp>(loc, intrinRet,
-                                                        ArrayRef<int64_t>{0})
-                    .getResult();
-  auto overflow = rewriter
-                      .create<mlir::LLVM::ExtractValueOp>(loc, intrinRet,
-                                                          ArrayRef<int64_t>{1})
-                      .getResult();
+  mlir::Value result =
+      mlir::LLVM::ExtractValueOp::create(rewriter, loc, intrinRet,
+                                         ArrayRef<int64_t>{0})
+          .getResult();
+  mlir::Value overflow =
+      mlir::LLVM::ExtractValueOp::create(rewriter, loc, intrinRet,
+                                         ArrayRef<int64_t>{1})
+          .getResult();
 
   if (resultTy.getWidth() < encompassedTyInfo.width) {
-    auto resultLLVMTy = getTypeConverter()->convertType(resultTy);
-    auto truncResult =
-        rewriter.create<mlir::LLVM::TruncOp>(loc, resultLLVMTy, result);
+    mlir::Type resultLLVMTy = getTypeConverter()->convertType(resultTy);
+    mlir::Value truncResult =
+        mlir::LLVM::TruncOp::create(rewriter, loc, resultLLVMTy, result);
 
     // Extend the truncated result back to the encompassing type to check for
     // any overflows during the truncation.
     mlir::Value truncResultExt;
     if (resultTy.isSigned())
-      truncResultExt = rewriter.create<mlir::LLVM::SExtOp>(
-          loc, encompassedLLVMTy, truncResult);
+      truncResultExt = mlir::LLVM::SExtOp::create(rewriter, loc,
+                                                  encompassedLLVMTy, 
truncResult);
     else
-      truncResultExt = rewriter.create<mlir::LLVM::ZExtOp>(
-          loc, encompassedLLVMTy, truncResult);
-    auto truncOverflow = rewriter.create<mlir::LLVM::ICmpOp>(
-        loc, mlir::LLVM::ICmpPredicate::ne, truncResultExt, result);
+      truncResultExt = mlir::LLVM::ZExtOp::create(rewriter, loc,
+                                                  encompassedLLVMTy, 
truncResult);
+    mlir::Value truncOverflow =
+        mlir::LLVM::ICmpOp::create(rewriter, loc, 
mlir::LLVM::ICmpPredicate::ne,
+                                   truncResultExt, result);
 
     result = truncResult;
-    overflow = rewriter.create<mlir::LLVM::OrOp>(loc, overflow, truncOverflow);
+    overflow = mlir::LLVM::OrOp::create(rewriter, loc, overflow, 
truncOverflow);
   }
 
-  auto boolLLVMTy = 
getTypeConverter()->convertType(op.getOverflow().getType());
+  mlir::Type boolLLVMTy =
+      getTypeConverter()->convertType(op.getOverflow().getType());
   if (boolLLVMTy != rewriter.getI1Type())
-    overflow = rewriter.create<mlir::LLVM::ZExtOp>(loc, boolLLVMTy, overflow);
+    overflow = mlir::LLVM::ZExtOp::create(rewriter, loc, boolLLVMTy, overflow);
 
   rewriter.replaceOp(op, mlir::ValueRange{result, overflow});
 

>From 5c00c3de0958f57ce05dd789d11ecc68fb62ed79 Mon Sep 17 00:00:00 2001
From: Adam Smith <[email protected]>
Date: Tue, 18 Nov 2025 16:30:51 -0800
Subject: [PATCH 11/12] [CIR] Fix style and remove helper function in overflow
 builtins

- Rename 'signed' field to 'isSigned' (signed is a keyword)
- Use lowerCamelCase for all variables and parameters
- Replace createBinOpOverflowOp helper with direct BinOpOverflowOp::create calls
---
 clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp | 44 ++++++++++++-------------
 1 file changed, 22 insertions(+), 22 deletions(-)

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp 
b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index f965450fc7d89..8ac6bc2bdb80d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -61,40 +61,40 @@ static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const 
CallExpr *e,
 namespace {
 struct WidthAndSignedness {
   unsigned width;
-  bool signed;
+  bool isSigned;
 };
 } // namespace
 
 static WidthAndSignedness
 getIntegerWidthAndSignedness(const clang::ASTContext &astContext,
                              const clang::QualType type) {
-  assert(Type->isIntegerType() && "Given type is not an integer.");
+  assert(type->isIntegerType() && "Given type is not an integer.");
   unsigned width = type->isBooleanType()  ? 1
-                   : Type->isBitIntType() ? astContext.getIntWidth(Type)
-                                          : astContext.getTypeInfo(Type).Width;
-  bool signed = Type->isSignedIntegerType();
-  return {Width, Signed};
+                   : type->isBitIntType() ? astContext.getIntWidth(type)
+                                          : astContext.getTypeInfo(type).Width;
+  bool isSigned = type->isSignedIntegerType();
+  return {width, isSigned};
 }
 
 // Given one or more integer types, this function produces an integer type that
 // encompasses them: any value in one of the given types could be expressed in
 // the encompassing type.
 static struct WidthAndSignedness
-EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
-  assert(Types.size() > 0 && "Empty list of types.");
+EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> types) {
+  assert(types.size() > 0 && "Empty list of types.");
 
   // If any of the given types is signed, we must return a signed type.
-bool Signed = llvm::any_of(Types, [](const auto &T) { return T.Signed; });
+  bool isSigned = llvm::any_of(types, [](const auto &t) { return t.isSigned; 
});
 
   // The encompassing type must have a width greater than or equal to the width
   // of the specified types.  Additionally, if the encompassing type is signed,
   // its width must be strictly greater than the width of any unsigned types
   // given.
-unsigned Width = 0;
-for (const auto &Type : Types)
-  Width = std::max(Width, Type.Width + (Signed && !Type.Signed));
+  unsigned width = 0;
+  for (const auto &type : types)
+    width = std::max(width, type.width + (isSigned && !type.isSigned));
 
-  return {Width, Signed};
+  return {width, isSigned};
 }
 
 RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
@@ -554,7 +554,7 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl 
&gd, unsigned builtinID,
         EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
 
     auto EncompassingCIRTy = cir::IntType::get(
-        &getMLIRContext(), EncompassingInfo.Width, EncompassingInfo.Signed);
+        &getMLIRContext(), EncompassingInfo.width, EncompassingInfo.isSigned);
     auto ResultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(ResultQTy));
 
     mlir::Value Left = emitScalarExpr(LeftArg);
@@ -586,8 +586,8 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl 
&gd, unsigned builtinID,
     }
 
     auto Loc = getLoc(e->getSourceRange());
-    auto ArithResult =
-        builder.createBinOpOverflowOp(Loc, ResultCIRTy, OpKind, Left, Right);
+    cir::BinOpOverflowOp ArithOp =
+        cir::BinOpOverflowOp::create(builder, Loc, ResultCIRTy, OpKind, Left, 
Right);
 
     // Here is a slight difference from the original clang CodeGen:
     //   - In the original clang CodeGen, the checked arithmetic result is
@@ -601,10 +601,10 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl 
&gd, unsigned builtinID,
     // Finally, store the result using the pointer.
     bool isVolatile =
         ResultArg->getType()->getPointeeType().isVolatileQualified();
-    builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy),
+    builder.createStore(Loc, emitToMemory(ArithOp.getResult(), ResultQTy),
                         ResultPtr, isVolatile);
 
-    return RValue::get(ArithResult.overflow);
+    return RValue::get(ArithOp.getOverflow());
   }
 
   case Builtin::BI__builtin_uadd_overflow:
@@ -668,15 +668,15 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl 
&gd, unsigned builtinID,
     auto ResultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(ResultQTy));
 
     auto Loc = getLoc(e->getSourceRange());
-    auto ArithResult =
-        builder.createBinOpOverflowOp(Loc, ResultCIRTy, ArithKind, X, Y);
+    cir::BinOpOverflowOp ArithOp =
+        cir::BinOpOverflowOp::create(builder, Loc, ResultCIRTy, ArithKind, X, 
Y);
 
     bool isVolatile =
         ResultArg->getType()->getPointeeType().isVolatileQualified();
-    builder.createStore(Loc, emitToMemory(ArithResult.result, ResultQTy),
+    builder.createStore(Loc, emitToMemory(ArithOp.getResult(), ResultQTy),
                         ResultPtr, isVolatile);
 
-    return RValue::get(ArithResult.overflow);
+    return RValue::get(ArithOp.getOverflow());
   }
   }
 

>From 11b1a110c0f888f91fa17dabf33714d300e69dc3 Mon Sep 17 00:00:00 2001
From: Adam Smith <[email protected]>
Date: Tue, 18 Nov 2025 20:06:19 -0800
Subject: [PATCH 12/12] [CIR] Add support for thread-local storage (TLS)

This commit adds full support for thread-local storage variables in ClangIR,
including code generation, lowering to LLVM IR, and comprehensive testing.

Changes include:
- Added CIR_TLSModel enum with 4 TLS models (GeneralDynamic, LocalDynamic,
  InitialExec, LocalExec) to CIROps.td
- Extended GlobalOp with optional tls_model attribute
- Extended GetGlobalOp with thread_local unit attribute
- Added verification to ensure thread_local GetGlobalOp references globals
  with tls_model set
- Implemented GetDefaultCIRTLSModel() and setTLSMode() in CIRGenModule
- Updated getAddrOfGlobalVar() to handle TLS access
- Removed MissingFeatures assertions for TLS operations
- Added lowering of GetGlobalOp with TLS to llvm.threadlocal.address intrinsic
- Added lowering of GlobalOp with tls_model to LLVM thread_local globals
- Added comprehensive test with CIR, LLVM, and OGCG checks

Known limitations (matching incubator):
- Static local TLS variables not yet implemented
- TLS_Dynamic with wrapper functions not yet implemented

This fixes issue #153270
---
 .../CIR/Dialect/Builder/CIRBaseBuilder.h      | 13 +++---
 clang/include/clang/CIR/Dialect/IR/CIROps.td  | 17 ++++++-
 clang/lib/CIR/CodeGen/CIRGenDecl.cpp          |  3 +-
 clang/lib/CIR/CodeGen/CIRGenExpr.cpp          |  7 +--
 clang/lib/CIR/CodeGen/CIRGenModule.cpp        | 45 ++++++++++++++++---
 clang/lib/CIR/CodeGen/CIRGenModule.h          |  7 +++
 clang/lib/CIR/Dialect/IR/CIRDialect.cpp       |  5 ++-
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 12 ++---
 clang/test/CIR/CodeGen/tls.c                  | 29 ++++++++++++
 9 files changed, 116 insertions(+), 22 deletions(-)
 create mode 100644 clang/test/CIR/CodeGen/tls.c

diff --git a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h 
b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
index 3288f5b12c77e..7bafa42df2739 100644
--- a/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
+++ b/clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
@@ -300,14 +300,17 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
     return cir::GlobalViewAttr::get(type, symbol, indices);
   }
 
-  mlir::Value createGetGlobal(mlir::Location loc, cir::GlobalOp global) {
+  mlir::Value createGetGlobal(mlir::Location loc, cir::GlobalOp global,
+                              bool threadLocal = false) {
     assert(!cir::MissingFeatures::addressSpace());
-    return cir::GetGlobalOp::create(
-        *this, loc, getPointerTo(global.getSymType()), global.getSymName());
+    auto getGlobalOp = cir::GetGlobalOp::create(
+        *this, loc, getPointerTo(global.getSymType()), global.getSymNameAttr(),
+        threadLocal);
+    return getGlobalOp.getAddr();
   }
 
-  mlir::Value createGetGlobal(cir::GlobalOp global) {
-    return createGetGlobal(global.getLoc(), global);
+  mlir::Value createGetGlobal(cir::GlobalOp global, bool threadLocal = false) {
+    return createGetGlobal(global.getLoc(), global, threadLocal);
   }
 
   /// Create a copy with inferred length.
diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td 
b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 328880d6f3581..1200fc049b087 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -1958,6 +1958,13 @@ def CIR_GlobalLinkageKind : CIR_I32EnumAttr<
 // properties of a global variable will be added over time as more of ClangIR
 // is upstreamed.
 
+def CIR_TLSModel : CIR_I32EnumAttr<"TLS_Model", "TLS model", [
+  I32EnumAttrCase<"GeneralDynamic", 0, "tls_dyn">,
+  I32EnumAttrCase<"LocalDynamic", 1, "tls_local_dyn">,
+  I32EnumAttrCase<"InitialExec", 2, "tls_init_exec">,
+  I32EnumAttrCase<"LocalExec", 3, "tls_local_exec">
+]>;
+
 def CIR_GlobalOp : CIR_Op<"global", [
   DeclareOpInterfaceMethods<RegionBranchOpInterface>,
   DeclareOpInterfaceMethods<CIRGlobalValueInterface>,
@@ -1986,6 +1993,7 @@ def CIR_GlobalOp : CIR_Op<"global", [
                        OptionalAttr<StrAttr>:$sym_visibility,
                        TypeAttr:$sym_type,
                        CIR_GlobalLinkageKind:$linkage,
+                       OptionalAttr<CIR_TLSModel>:$tls_model,
                        OptionalAttr<AnyAttr>:$initial_value,
                        UnitAttr:$comdat,
                        UnitAttr:$constant,
@@ -2001,6 +2009,7 @@ def CIR_GlobalOp : CIR_Op<"global", [
     (`constant` $constant^)?
     $linkage
     (`comdat` $comdat^)?
+    ($tls_model^)?
     (`dso_local` $dso_local^)?
     $sym_name
     custom<GlobalOpTypeAndInitialValue>($sym_type, $initial_value,
@@ -2064,16 +2073,22 @@ def CIR_GetGlobalOp : CIR_Op<"get_global", [
     undefined. The resulting type must always be a `!cir.ptr<...>` type with 
the
     same address space as the global variable.
 
+    Addresses of thread local globals can only be retrieved if this operation
+    is marked `thread_local`, which indicates the address isn't constant.
+
     Example:
     ```mlir
     %x = cir.get_global @gv : !cir.ptr<i32>
+    ...
+    %y = cir.get_global thread_local @tls_gv : !cir.ptr<i32>
     ```
   }];
 
-  let arguments = (ins FlatSymbolRefAttr:$name);
+  let arguments = (ins FlatSymbolRefAttr:$name, UnitAttr:$tls);
   let results = (outs Res<CIR_PointerType, "", []>:$addr);
 
   let assemblyFormat = [{
+    (`thread_local` $tls^)?
     $name `:` qualified(type($addr)) attr-dict
   }];
 }
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp 
b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index aeea0efeb77c3..27e9c7bc53fc4 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -361,7 +361,8 @@ CIRGenModule::getOrCreateStaticVarDecl(const VarDecl &d,
   if (supportsCOMDAT() && gv.isWeakForLinker())
     gv.setComdat(true);
 
-  assert(!cir::MissingFeatures::opGlobalThreadLocal());
+  if (d.getTLSKind())
+    llvm_unreachable("TLS mode is NYI");
 
   setGVProperties(gv, &d);
 
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 5ccb431e626ae..e2869bcba3b79 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -277,7 +277,6 @@ static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, 
const Expr *e,
   QualType t = e->getType();
 
   // If it's thread_local, emit a call to its wrapper function instead.
-  assert(!cir::MissingFeatures::opGlobalThreadLocal());
   if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
     cgf.cgm.errorNYI(e->getSourceRange(),
                      "emitGlobalVarDeclLValue: thread_local variable");
@@ -312,7 +311,8 @@ static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, 
const Expr *e,
 void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
                                        bool isVolatile, QualType ty,
                                        bool isInit, bool isNontemporal) {
-  assert(!cir::MissingFeatures::opLoadStoreThreadLocal());
+  // Traditional LLVM codegen handles thread local separately, CIR handles
+  // as part of getAddrOfGlobalVar (GetGlobalOp).
 
   if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
     // Boolean vectors use `iN` as storage type.
@@ -556,7 +556,8 @@ void CIRGenFunction::emitStoreOfScalar(mlir::Value value, 
LValue lvalue,
 mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
                                              QualType ty, SourceLocation loc,
                                              LValueBaseInfo baseInfo) {
-  assert(!cir::MissingFeatures::opLoadStoreThreadLocal());
+  // Traditional LLVM codegen handles thread local separately, CIR handles
+  // as part of getAddrOfGlobalVar (GetGlobalOp).
   mlir::Type eltTy = addr.getElementType();
 
   if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp 
b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 9f9b2db4771df..8426d5dae110f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -679,8 +679,11 @@ CIRGenModule::getOrCreateCIRGlobal(StringRef mangledName, 
mlir::Type ty,
 
     setLinkageForGV(gv, d);
 
-    if (d->getTLSKind())
-      errorNYI(d->getSourceRange(), "thread local global variable");
+    if (d->getTLSKind()) {
+      if (d->getTLSKind() == VarDecl::TLS_Dynamic)
+        errorNYI(d->getSourceRange(), "TLS dynamic");
+      setTLSMode(gv, *d);
+    }
 
     setGVProperties(gv, d);
 
@@ -735,12 +738,13 @@ mlir::Value CIRGenModule::getAddrOfGlobalVar(const 
VarDecl *d, mlir::Type ty,
   if (!ty)
     ty = getTypes().convertTypeForMem(astTy);
 
-  assert(!cir::MissingFeatures::opGlobalThreadLocal());
-
+  bool tlsAccess = d->getTLSKind() != VarDecl::TLS_None;
   cir::GlobalOp g = getOrCreateCIRGlobal(d, ty, isForDefinition);
   mlir::Type ptrTy = builder.getPointerTo(g.getSymType());
-  return cir::GetGlobalOp::create(builder, getLoc(d->getSourceRange()), ptrTy,
-                                  g.getSymName());
+  auto getGlobalOp = cir::GetGlobalOp::create(
+      builder, getLoc(d->getSourceRange()), ptrTy, g.getSymNameAttr(),
+      tlsAccess);
+  return getGlobalOp.getAddr();
 }
 
 cir::GlobalViewAttr CIRGenModule::getAddrOfGlobalVarAttr(const VarDecl *d) {
@@ -1898,6 +1902,35 @@ void CIRGenModule::setGVPropertiesAux(mlir::Operation 
*op,
   assert(!cir::MissingFeatures::opGlobalPartition());
 }
 
+cir::TLS_Model CIRGenModule::GetDefaultCIRTLSModel() const {
+  switch (getCodeGenOpts().getDefaultTLSModel()) {
+  case CodeGenOptions::GeneralDynamicTLSModel:
+    return cir::TLS_Model::GeneralDynamic;
+  case CodeGenOptions::LocalDynamicTLSModel:
+    return cir::TLS_Model::LocalDynamic;
+  case CodeGenOptions::InitialExecTLSModel:
+    return cir::TLS_Model::InitialExec;
+  case CodeGenOptions::LocalExecTLSModel:
+    return cir::TLS_Model::LocalExec;
+  }
+  llvm_unreachable("Invalid TLS model!");
+}
+
+void CIRGenModule::setTLSMode(mlir::Operation *op, const VarDecl &d) const {
+  assert(d.getTLSKind() && "setting TLS mode on non-TLS var!");
+
+  auto tlm = GetDefaultCIRTLSModel();
+
+  // Override the TLS model if it is explicitly specified.
+  if (d.getAttr<TLSModelAttr>()) {
+    llvm_unreachable("NYI");
+  }
+
+  auto global = dyn_cast<cir::GlobalOp>(op);
+  assert(global && "NYI for other operations");
+  global.setTlsModel(tlm);
+}
+
 void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl,
                                          cir::FuncOp func,
                                          bool isIncompleteFunction,
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h 
b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 186913d1bac9d..44b810d1fffda 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -425,6 +425,13 @@ class CIRGenModule : public CIRGenTypeCache {
   void setGVProperties(mlir::Operation *op, const NamedDecl *d) const;
   void setGVPropertiesAux(mlir::Operation *op, const NamedDecl *d) const;
 
+  /// Set TLS mode for the given operation based on the given variable
+  /// declaration.
+  void setTLSMode(mlir::Operation *op, const VarDecl &d) const;
+
+  /// Get TLS mode from CodeGenOptions.
+  cir::TLS_Model GetDefaultCIRTLSModel() const;
+
   /// Set function attributes for a function declaration.
   void setFunctionAttributes(GlobalDecl gd, cir::FuncOp f,
                              bool isIncompleteFunction, bool isThunk);
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp 
b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 7ba03ce40140c..8cb52336e35d2 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -1527,7 +1527,10 @@ cir::GetGlobalOp::verifySymbolUses(SymbolTableCollection 
&symbolTable) {
   if (auto g = dyn_cast<GlobalOp>(op)) {
     symTy = g.getSymType();
     assert(!cir::MissingFeatures::addressSpace());
-    assert(!cir::MissingFeatures::opGlobalThreadLocal());
+    // Verify that for thread local global access, the global needs to
+    // be marked with tls bits.
+    if (getTls() && !g.getTlsModel())
+      return emitOpError("access to global not marked thread local");
   } else if (auto f = dyn_cast<FuncOp>(op)) {
     symTy = f.getFunctionType();
   } else {
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 1896b89f6758f..f1349b0963b82 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -1911,7 +1911,11 @@ mlir::LogicalResult 
CIRToLLVMGetGlobalOpLowering::matchAndRewrite(
   mlir::Operation *newop = mlir::LLVM::AddressOfOp::create(
       rewriter, op.getLoc(), type, op.getName());
 
-  assert(!cir::MissingFeatures::opGlobalThreadLocal());
+  if (op.getTls()) {
+    // Handle access to TLS via intrinsic.
+    newop = mlir::LLVM::ThreadlocalAddressOp::create(rewriter, op.getLoc(),
+                                                     type, 
newop->getResult(0));
+  }
 
   rewriter.replaceOp(op, newop);
   return mlir::success();
@@ -1933,8 +1937,7 @@ void 
CIRToLLVMGlobalOpLowering::setupRegionInitializedLLVMGlobalOp(
   assert(!cir::MissingFeatures::addressSpace());
   const unsigned addrSpace = 0;
   const bool isDsoLocal = op.getDsoLocal();
-  assert(!cir::MissingFeatures::opGlobalThreadLocal());
-  const bool isThreadLocal = false;
+  const bool isThreadLocal = (bool)op.getTlsModelAttr();
   const uint64_t alignment = op.getAlignment().value_or(0);
   const mlir::LLVM::Linkage linkage = convertLinkage(op.getLinkage());
   const StringRef symbol = op.getSymName();
@@ -1993,8 +1996,7 @@ mlir::LogicalResult 
CIRToLLVMGlobalOpLowering::matchAndRewrite(
   assert(!cir::MissingFeatures::addressSpace());
   const unsigned addrSpace = 0;
   const bool isDsoLocal = op.getDsoLocal();
-  assert(!cir::MissingFeatures::opGlobalThreadLocal());
-  const bool isThreadLocal = false;
+  const bool isThreadLocal = (bool)op.getTlsModelAttr();
   const uint64_t alignment = op.getAlignment().value_or(0);
   const mlir::LLVM::Linkage linkage = convertLinkage(op.getLinkage());
   const StringRef symbol = op.getSymName();
diff --git a/clang/test/CIR/CodeGen/tls.c b/clang/test/CIR/CodeGen/tls.c
new file mode 100644
index 0000000000000..983b31452b5c4
--- /dev/null
+++ b/clang/test/CIR/CodeGen/tls.c
@@ -0,0 +1,29 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o 
%t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o 
%t.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ogcg.ll
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ogcg.ll %s
+
+extern __thread int b;
+// CIR: cir.global "private" external tls_dyn @b : !s32i
+
+__thread int a;
+// CIR: cir.global external tls_dyn @a = #cir.int<0> : !s32i
+
+int c(void) { return *&b; }
+// CIR: cir.func dso_local @c() -> !s32i
+// CIR:   %[[TLS_ADDR:.*]] = cir.get_global thread_local @b : !cir.ptr<!s32i>
+
+// LLVM: @b = external thread_local global i32
+// LLVM: @a = thread_local global i32 0
+
+// LLVM-LABEL: @c
+// LLVM: = call ptr @llvm.threadlocal.address.p0(ptr @b)
+
+// OGCG: @b = external thread_local{{.*}} global i32
+// OGCG: @a = thread_local{{.*}} global i32 0
+
+// OGCG-LABEL: define{{.*}} @c
+// OGCG: call{{.*}} ptr @llvm.threadlocal.address.p0(ptr{{.*}} @b)
+

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to