https://github.com/AmrDeveloper created 
https://github.com/llvm/llvm-project/pull/160002

Upstream the RTTI builder with helpers and used them in the VTable Definitions

Issue https://github.com/llvm/llvm-project/issues/154992

>From 87e9469fe835e2f519737a84cde6eb951b393dae Mon Sep 17 00:00:00 2001
From: AmrDeveloper <am...@programmer.net>
Date: Sun, 21 Sep 2025 13:04:32 +0200
Subject: [PATCH] [CIR] Upstream RTTI Builder & RTTI for VTable Definitions

---
 clang/lib/CIR/CodeGen/CIRGenBuilder.h         |    5 +
 clang/lib/CIR/CodeGen/CIRGenCXXABI.h          |    3 +
 clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 1031 +++++++++++++++++
 clang/lib/CIR/CodeGen/CIRGenModule.cpp        |   49 +-
 clang/lib/CIR/CodeGen/CIRGenModule.h          |   29 +
 clang/lib/CIR/CodeGen/CIRGenVTables.cpp       |   43 +
 clang/lib/CIR/CodeGen/CIRGenVTables.h         |    2 +
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp |   10 +-
 clang/test/CIR/CodeGen/vtable-rtti.cpp        |  503 ++++++++
 9 files changed, 1669 insertions(+), 6 deletions(-)
 create mode 100644 clang/test/CIR/CodeGen/vtable-rtti.cpp

diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h 
b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
index 6a1746a7ad0ac..b76a15ded641b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h
+++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h
@@ -89,6 +89,11 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
     return cir::ConstRecordAttr::get(sTy, arrayAttr);
   }
 
+  cir::TypeInfoAttr getTypeInfo(mlir::ArrayAttr fieldsAttr) {
+    auto anonRecord = getAnonConstRecord(fieldsAttr);
+    return cir::TypeInfoAttr::get(anonRecord.getType(), fieldsAttr);
+  }
+
   std::string getUniqueAnonRecordName() { return getUniqueRecordName("anon"); }
 
   std::string getUniqueRecordName(const std::string &baseName) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h 
b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
index ae922599809b8..1dee77425c30d 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h
@@ -114,6 +114,9 @@ class CIRGenCXXABI {
 
   virtual void emitRethrow(CIRGenFunction &cgf, bool isNoReturn) = 0;
 
+  virtual mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc,
+                                                  QualType ty) = 0;
+
   /// Get the type of the implicit "this" parameter used by a method. May 
return
   /// zero if no specific type is applicable, e.g. if the ABI expects the 
"this"
   /// parameter to point to some artificial offset in a complete object due to
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp 
b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index 0bf6cf556787c..3bf8dd34f3118 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -103,6 +103,9 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI {
                              const CXXRecordDecl *rd) override;
   void emitVirtualInheritanceTables(const CXXRecordDecl *rd) override;
 
+  mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc,
+                                          QualType ty) override;
+
   bool doStructorsInitializeVPtrs(const CXXRecordDecl *vtableClass) override {
     return true;
   }
@@ -111,6 +114,34 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI {
   getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &cgf,
                             Address thisAddr, const CXXRecordDecl *classDecl,
                             const CXXRecordDecl *baseClassDecl) override;
+
+  /**************************** RTTI Uniqueness ******************************/
+protected:
+  /// Returns true if the ABI requires RTTI type_info objects to be unique
+  /// across a program.
+  virtual bool shouldRTTIBeUnique() const { return true; }
+
+public:
+  /// What sort of unique-RTTI behavior should we use?
+  enum RTTIUniquenessKind {
+    /// We are guaranteeing, or need to guarantee, that the RTTI string
+    /// is unique.
+    RUK_Unique,
+
+    /// We are not guaranteeing uniqueness for the RTTI string, so we
+    /// can demote to hidden visibility but must use string comparisons.
+    RUK_NonUniqueHidden,
+
+    /// We are not guaranteeing uniqueness for the RTTI string, so we
+    /// have to use string comparisons, but we also have to emit it with
+    /// non-hidden visibility.
+    RUK_NonUniqueVisible
+  };
+
+  /// Return the required visibility status for the given type and linkage in
+  /// the current ABI.
+  RTTIUniquenessKind
+  classifyRTTIUniqueness(QualType canTy, cir::GlobalLinkageKind linkage) const;
 };
 
 } // namespace
@@ -424,6 +455,1006 @@ void CIRGenItaniumCXXABI::emitVirtualInheritanceTables(
   vtables.emitVTTDefinition(vtt, cgm.getVTableLinkage(rd), rd);
 }
 
+namespace {
+class CIRGenItaniumRTTIBuilder {
+  CIRGenModule &cgm;                 // Per-module state.
+  const CIRGenItaniumCXXABI &cxxABI; // Per-module state.
+
+  /// The fields of the RTTI descriptor currently being built.
+  SmallVector<mlir::Attribute, 16> fields;
+
+  // Returns the mangled type name of the given type.
+  cir::GlobalOp getAddrOfTypeName(mlir::Location loc, QualType ty,
+                                  cir::GlobalLinkageKind linkage);
+
+  /// descriptor of the given type.
+  mlir::Attribute getAddrOfExternalRTTIDescriptor(mlir::Location loc,
+                                                  QualType ty);
+
+  /// Build the vtable pointer for the given type.
+  void buildVTablePointer(mlir::Location loc, const Type *ty);
+
+  /// Build an abi::__si_class_type_info, used for single inheritance, 
according
+  /// to the Itanium C++ ABI, 2.9.5p6b.
+  void buildSIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *rd);
+
+  /// Build an abi::__vmi_class_type_info, used for
+  /// classes with bases that do not satisfy the abi::__si_class_type_info
+  /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+  void buildVMIClassTypeInfo(mlir::Location loc, const CXXRecordDecl *rd);
+
+public:
+  CIRGenItaniumRTTIBuilder(const CIRGenItaniumCXXABI &abi, CIRGenModule &_cgm)
+      : cgm(_cgm), cxxABI(abi) {}
+
+  /// Build the RTTI type info struct for the given type, or
+  /// link to an existing RTTI descriptor if one already exists.
+  mlir::Attribute buildTypeInfo(mlir::Location loc, QualType ty);
+
+  /// Build the RTTI type info struct for the given type.
+  mlir::Attribute buildTypeInfo(mlir::Location loc, QualType ty,
+                                cir::GlobalLinkageKind linkage,
+                                mlir::SymbolTable::Visibility visibility);
+};
+} // namespace
+
+// TODO(cir): Will be removed after sharing them with the classical codegen
+namespace {
+
+// Pointer type info flags.
+enum {
+  /// PTI_Const - Type has const qualifier.
+  PTI_Const = 0x1,
+
+  /// PTI_Volatile - Type has volatile qualifier.
+  PTI_Volatile = 0x2,
+
+  /// PTI_Restrict - Type has restrict qualifier.
+  PTI_Restrict = 0x4,
+
+  /// PTI_Incomplete - Type is incomplete.
+  PTI_Incomplete = 0x8,
+
+  /// PTI_ContainingClassIncomplete - Containing class is incomplete.
+  /// (in pointer to member).
+  PTI_ContainingClassIncomplete = 0x10,
+
+  /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
+  // PTI_TransactionSafe = 0x20,
+
+  /// PTI_Noexcept - Pointee is noexcept function (C++1z).
+  PTI_Noexcept = 0x40,
+};
+
+// VMI type info flags.
+enum {
+  /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
+  VMI_NonDiamondRepeat = 0x1,
+
+  /// VMI_DiamondShaped - Class is diamond shaped.
+  VMI_DiamondShaped = 0x2
+};
+
+// Base class type info flags.
+enum {
+  /// BCTI_Virtual - Base class is virtual.
+  BCTI_Virtual = 0x1,
+
+  /// BCTI_Public - Base class is public.
+  BCTI_Public = 0x2
+};
+
+/// Given a builtin type, returns whether the type
+/// info for that type is defined in the standard library.
+/// TODO(cir): this can unified with LLVM codegen
+static bool TypeInfoIsInStandardLibrary(const BuiltinType *ty) {
+  // Itanium C++ ABI 2.9.2:
+  //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
+  //   the run-time support library. Specifically, the run-time support
+  //   library should contain type_info objects for the types X, X* and
+  //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
+  //   unsigned char, signed char, short, unsigned short, int, unsigned int,
+  //   long, unsigned long, long long, unsigned long long, float, double,
+  //   long double, char16_t, char32_t, and the IEEE 754r decimal and
+  //   half-precision floating point types.
+  //
+  // GCC also emits RTTI for __int128.
+  // FIXME: We do not emit RTTI information for decimal types here.
+
+  // Types added here must also be added to EmitFundamentalRTTIDescriptors.
+  switch (ty->getKind()) {
+  case BuiltinType::WasmExternRef:
+  case BuiltinType::HLSLResource:
+    llvm_unreachable("NYI");
+  case BuiltinType::Void:
+  case BuiltinType::NullPtr:
+  case BuiltinType::Bool:
+  case BuiltinType::WChar_S:
+  case BuiltinType::WChar_U:
+  case BuiltinType::Char_U:
+  case BuiltinType::Char_S:
+  case BuiltinType::UChar:
+  case BuiltinType::SChar:
+  case BuiltinType::Short:
+  case BuiltinType::UShort:
+  case BuiltinType::Int:
+  case BuiltinType::UInt:
+  case BuiltinType::Long:
+  case BuiltinType::ULong:
+  case BuiltinType::LongLong:
+  case BuiltinType::ULongLong:
+  case BuiltinType::Half:
+  case BuiltinType::Float:
+  case BuiltinType::Double:
+  case BuiltinType::LongDouble:
+  case BuiltinType::Float16:
+  case BuiltinType::Float128:
+  case BuiltinType::Ibm128:
+  case BuiltinType::Char8:
+  case BuiltinType::Char16:
+  case BuiltinType::Char32:
+  case BuiltinType::Int128:
+  case BuiltinType::UInt128:
+    return true;
+
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix)                   
\
+  case BuiltinType::Id:
+#include "clang/Basic/OpenCLImageTypes.def"
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) case BuiltinType::Id:
+#include "clang/Basic/OpenCLExtensionTypes.def"
+  case BuiltinType::OCLSampler:
+  case BuiltinType::OCLEvent:
+  case BuiltinType::OCLClkEvent:
+  case BuiltinType::OCLQueue:
+  case BuiltinType::OCLReserveID:
+#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/AArch64ACLETypes.def"
+#define PPC_VECTOR_TYPE(Name, Id, Size) case BuiltinType::Id:
+#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
+#define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id:
+#include "clang/Basic/AMDGPUTypes.def"
+  case BuiltinType::ShortAccum:
+  case BuiltinType::Accum:
+  case BuiltinType::LongAccum:
+  case BuiltinType::UShortAccum:
+  case BuiltinType::UAccum:
+  case BuiltinType::ULongAccum:
+  case BuiltinType::ShortFract:
+  case BuiltinType::Fract:
+  case BuiltinType::LongFract:
+  case BuiltinType::UShortFract:
+  case BuiltinType::UFract:
+  case BuiltinType::ULongFract:
+  case BuiltinType::SatShortAccum:
+  case BuiltinType::SatAccum:
+  case BuiltinType::SatLongAccum:
+  case BuiltinType::SatUShortAccum:
+  case BuiltinType::SatUAccum:
+  case BuiltinType::SatULongAccum:
+  case BuiltinType::SatShortFract:
+  case BuiltinType::SatFract:
+  case BuiltinType::SatLongFract:
+  case BuiltinType::SatUShortFract:
+  case BuiltinType::SatUFract:
+  case BuiltinType::SatULongFract:
+  case BuiltinType::BFloat16:
+    return false;
+
+  case BuiltinType::Dependent:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+    llvm_unreachable("asking for RRTI for a placeholder type!");
+
+  case BuiltinType::ObjCId:
+  case BuiltinType::ObjCClass:
+  case BuiltinType::ObjCSel:
+    llvm_unreachable("FIXME: Objective-C types are unsupported!");
+  }
+
+  llvm_unreachable("Invalid BuiltinType Kind!");
+}
+
+static bool TypeInfoIsInStandardLibrary(const PointerType *pointerTy) {
+  QualType pointeeTy = pointerTy->getPointeeType();
+  const auto *builtinTy = dyn_cast<BuiltinType>(pointeeTy);
+  if (!builtinTy)
+    return false;
+
+  // Check the qualifiers.
+  Qualifiers quals = pointeeTy.getQualifiers();
+  quals.removeConst();
+
+  if (!quals.empty())
+    return false;
+
+  return TypeInfoIsInStandardLibrary(builtinTy);
+}
+
+/// IsStandardLibraryRTTIDescriptor - Returns whether the type
+/// information for the given type exists in the standard library.
+static bool IsStandardLibraryRTTIDescriptor(QualType ty) {
+  // Type info for builtin types is defined in the standard library.
+  if (const auto *builtinTy = dyn_cast<BuiltinType>(ty))
+    return TypeInfoIsInStandardLibrary(builtinTy);
+
+  // Type info for some pointer types to builtin types is defined in the
+  // standard library.
+  if (const auto *pointerTy = dyn_cast<PointerType>(ty))
+    return TypeInfoIsInStandardLibrary(pointerTy);
+
+  return false;
+}
+
+/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
+/// the given type exists somewhere else, and that we should not emit the type
+/// information in this translation unit.  Assumes that it is not a
+/// standard-library type.
+static bool ShouldUseExternalRTTIDescriptor(CIRGenModule &cgm, QualType ty) {
+  ASTContext &context = cgm.getASTContext();
+
+  // If RTTI is disabled, assume it might be disabled in the
+  // translation unit that defines any potential key function, too.
+  if (!context.getLangOpts().RTTI)
+    return false;
+
+  if (const auto *recordTy = dyn_cast<RecordType>(ty)) {
+    const CXXRecordDecl *rd =
+        
cast<CXXRecordDecl>(recordTy->getOriginalDecl())->getDefinitionOrSelf();
+    if (!rd->hasDefinition())
+      return false;
+
+    if (!rd->isDynamicClass())
+      return false;
+
+    // FIXME: this may need to be reconsidered if the key function
+    // changes.
+    // N.B. We must always emit the RTTI data ourselves if there exists a key
+    // function.
+    bool isDLLImport = rd->hasAttr<DLLImportAttr>();
+
+    // Don't import the RTTI but emit it locally.
+    if (cgm.getTriple().isOSCygMing())
+      return false;
+
+    if (cgm.getVTables().isVTableExternal(rd)) {
+      if (cgm.getTarget().hasPS4DLLImportExport())
+        return true;
+
+      return !isDLLImport || cgm.getTriple().isWindowsItaniumEnvironment();
+    }
+
+    if (isDLLImport)
+      return true;
+  }
+
+  return false;
+}
+
+/// Contains virtual and non-virtual bases seen when traversing a class
+/// hierarchy.
+struct SeenBases {
+  llvm::SmallPtrSet<const CXXRecordDecl *, 16> nonVirtualBases;
+  llvm::SmallPtrSet<const CXXRecordDecl *, 16> virtualBases;
+};
+
+/// Compute the value of the flags member in abi::__vmi_class_type_info.
+///
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *base,
+                                             SeenBases &bases) {
+
+  unsigned flags = 0;
+  auto *baseDecl = base->getType()->castAsCXXRecordDecl();
+
+  if (base->isVirtual()) {
+    // Mark the virtual base as seen.
+    if (!bases.virtualBases.insert(baseDecl).second) {
+      // If this virtual base has been seen before, then the class is diamond
+      // shaped.
+      flags |= VMI_DiamondShaped;
+    } else {
+      if (bases.nonVirtualBases.count(baseDecl))
+        flags |= VMI_NonDiamondRepeat;
+    }
+  } else {
+    // Mark the non-virtual base as seen.
+    if (!bases.nonVirtualBases.insert(baseDecl).second) {
+      // If this non-virtual base has been seen before, then the class has non-
+      // diamond shaped repeated inheritance.
+      flags |= VMI_NonDiamondRepeat;
+    } else {
+      if (bases.virtualBases.count(baseDecl))
+        flags |= VMI_NonDiamondRepeat;
+    }
+  }
+
+  // Walk all bases.
+  for (const auto &bs : baseDecl->bases())
+    flags |= ComputeVMIClassTypeInfoFlags(&bs, bases);
+
+  return flags;
+}
+
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *rd) {
+  unsigned flags = 0;
+  SeenBases bases;
+
+  // Walk all bases.
+  for (const auto &bs : rd->bases())
+    flags |= ComputeVMIClassTypeInfoFlags(&bs, bases);
+
+  return flags;
+}
+
+// Return whether the given record decl has a "single,
+// public, non-virtual base at offset zero (i.e. the derived class is dynamic
+// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
+// TODO(cir): this can unified with LLVM codegen
+static bool CanUseSingleInheritance(const CXXRecordDecl *rd) {
+  // Check the number of bases.
+  if (rd->getNumBases() != 1)
+    return false;
+
+  // Get the base.
+  CXXRecordDecl::base_class_const_iterator base = rd->bases_begin();
+
+  // Check that the base is not virtual.
+  if (base->isVirtual())
+    return false;
+
+  // Check that the base is public.
+  if (base->getAccessSpecifier() != AS_public)
+    return false;
+
+  // Check that the class is dynamic iff the base is.
+  auto *baseDecl = base->getType()->castAsCXXRecordDecl();
+  return baseDecl->isEmpty() ||
+         baseDecl->isDynamicClass() == rd->isDynamicClass();
+}
+
+/// IsIncompleteClassType - Returns whether the given record type is 
incomplete.
+static bool IsIncompleteClassType(const RecordType *recordTy) {
+  return !recordTy->getOriginalDecl()
+              ->getDefinitionOrSelf()
+              ->isCompleteDefinition();
+}
+
+/// Returns whether the given type contains an
+/// incomplete class type. This is true if
+///
+///   * The given type is an incomplete class type.
+///   * The given type is a pointer type whose pointee type contains an
+///     incomplete class type.
+///   * The given type is a member pointer type whose class is an incomplete
+///     class type.
+///   * The given type is a member pointer type whoise pointee type contains an
+///     incomplete class type.
+/// is an indirect or direct pointer to an incomplete class type.
+static bool ContainsIncompleteClassType(QualType ty) {
+  if (const auto *recordTy = dyn_cast<RecordType>(ty)) {
+    if (IsIncompleteClassType(recordTy))
+      return true;
+  }
+
+  if (const auto *pointerTy = dyn_cast<PointerType>(ty))
+    return ContainsIncompleteClassType(pointerTy->getPointeeType());
+
+  if (const auto *memberPointerTy = dyn_cast<MemberPointerType>(ty)) {
+    // Check if the class type is incomplete.
+    if (!memberPointerTy->getMostRecentCXXRecordDecl()->hasDefinition())
+      return true;
+
+    return ContainsIncompleteClassType(memberPointerTy->getPointeeType());
+  }
+
+  return false;
+}
+
+/// Return the linkage that the type info and type info name constants
+/// should have for the given type.
+static cir::GlobalLinkageKind getTypeInfoLinkage(CIRGenModule &cgm,
+                                                 QualType ty) {
+  //   In addition, it and all of the intermediate abi::__pointer_type_info
+  //   structs in the chain down to the abi::__class_type_info for the
+  //   incomplete class type must be prevented from resolving to the
+  //   corresponding type_info structs for the complete class type, possibly
+  //   by making them local static objects. Finally, a dummy class RTTI is
+  //   generated for the incomplete type that will not resolve to the final
+  //   complete class RTTI (because the latter need not exist), possibly by
+  //   making it a local static object.
+  if (ContainsIncompleteClassType(ty))
+    return cir::GlobalLinkageKind::InternalLinkage;
+
+  switch (ty->getLinkage()) {
+  case Linkage::Invalid:
+    llvm_unreachable("Linkage hasn't been computed!");
+
+  case Linkage::None:
+  case Linkage::Internal:
+  case Linkage::UniqueExternal:
+    return cir::GlobalLinkageKind::InternalLinkage;
+
+  case Linkage::VisibleNone:
+  case Linkage::Module:
+  case Linkage::External:
+    // RTTI is not enabled, which means that this type info struct is going
+    // to be used for exception handling. Give it linkonce_odr linkage.
+    if (!cgm.getLangOpts().RTTI)
+      return cir::GlobalLinkageKind::LinkOnceODRLinkage;
+
+    if (const RecordType *record = dyn_cast<RecordType>(ty)) {
+      const CXXRecordDecl *rd =
+          
cast<CXXRecordDecl>(record->getOriginalDecl())->getDefinitionOrSelf();
+      if (rd->hasAttr<WeakAttr>())
+        return cir::GlobalLinkageKind::WeakODRLinkage;
+
+      if (cgm.getTriple().isWindowsItaniumEnvironment())
+        if (rd->hasAttr<DLLImportAttr>() &&
+            ShouldUseExternalRTTIDescriptor(cgm, ty))
+          return cir::GlobalLinkageKind::ExternalLinkage;
+
+      // MinGW always uses LinkOnceODRLinkage for type info.
+      if (rd->isDynamicClass() && !cgm.getASTContext()
+                                       .getTargetInfo()
+                                       .getTriple()
+                                       .isWindowsGNUEnvironment())
+        return cgm.getVTableLinkage(rd);
+    }
+
+    return cir::GlobalLinkageKind::LinkOnceODRLinkage;
+  }
+
+  llvm_unreachable("Invalid linkage!");
+}
+} // namespace
+
+// FIXME: Check please
+cir::GlobalOp
+CIRGenItaniumRTTIBuilder::getAddrOfTypeName(mlir::Location loc, QualType ty,
+                                            cir::GlobalLinkageKind linkage) {
+  auto &builder = cgm.getBuilder();
+  SmallString<256> name;
+  llvm::raw_svector_ostream out(name);
+  cgm.getCXXABI().getMangleContext().mangleCXXRTTIName(ty, out);
+
+  // We know that the mangled name of the type starts at index 4 of the
+  // mangled name of the typename, so we can just index into it in order to
+  // get the mangled name of the type.
+  mlir::Attribute init = builder.getString(
+      name.substr(4), cgm.convertType(cgm.getASTContext().CharTy),
+      std::nullopt);
+
+  CharUnits align =
+      cgm.getASTContext().getTypeAlignInChars(cgm.getASTContext().CharTy);
+
+  // builder.getString can return a #cir.zero if the string given to it only
+  // contains null bytes. However, type names cannot be full of null bytes.
+  // So cast Init to a ConstArrayAttr should be safe.
+  auto initStr = cast<cir::ConstArrayAttr>(init);
+
+  cir::GlobalOp gv = cgm.createOrReplaceCXXRuntimeVariable(
+      loc, name, initStr.getType(), linkage, align);
+  CIRGenModule::setInitializer(gv, init);
+  return gv;
+}
+
+mlir::Attribute
+CIRGenItaniumRTTIBuilder::getAddrOfExternalRTTIDescriptor(mlir::Location loc,
+                                                          QualType ty) {
+  // Mangle the RTTI name.
+  SmallString<256> name;
+  llvm::raw_svector_ostream out(name);
+  cgm.getCXXABI().getMangleContext().mangleCXXRTTI(ty, out);
+  CIRGenBuilderTy &builder = cgm.getBuilder();
+
+  // Look for an existing global.
+  cir::GlobalOp gv = dyn_cast_or_null<cir::GlobalOp>(
+      mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), name));
+
+  if (!gv) {
+    // Create a new global variable.
+    // From LLVM codegen => Note for the future: If we would ever like to do
+    // deferred emission of RTTI, check if emitting vtables opportunistically
+    // need any adjustment.
+    gv = CIRGenModule::createGlobalOp(cgm, loc, name, builder.getUInt8PtrTy(),
+                                      /*isConstant=*/true);
+    const CXXRecordDecl *rd = ty->getAsCXXRecordDecl();
+    cgm.setGVProperties(gv, rd);
+
+    // Import the typeinfo symbol when all non-inline virtual methods are
+    // imported.
+    if (cgm.getTarget().hasPS4DLLImportExport())
+      llvm_unreachable("NYI");
+  }
+
+  return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), gv);
+}
+
+// FIXME: Split this function
+void CIRGenItaniumRTTIBuilder::buildVTablePointer(mlir::Location loc,
+                                                  const Type *ty) {
+  auto &builder = cgm.getBuilder();
+
+  // abi::__class_type_info.
+  static const char *const ClassTypeInfo =
+      "_ZTVN10__cxxabiv117__class_type_infoE";
+  // abi::__si_class_type_info.
+  static const char *const SIClassTypeInfo =
+      "_ZTVN10__cxxabiv120__si_class_type_infoE";
+  // abi::__vmi_class_type_info.
+  static const char *const VMIClassTypeInfo =
+      "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
+
+  const char *vTableName = nullptr;
+
+  switch (ty->getTypeClass()) {
+  case Type::ArrayParameter:
+  case Type::HLSLAttributedResource:
+  case Type::HLSLInlineSpirv:
+    llvm_unreachable("NYI");
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.inc"
+    llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+  case Type::LValueReference:
+  case Type::RValueReference:
+    llvm_unreachable("References shouldn't get here");
+
+  case Type::Auto:
+  case Type::DeducedTemplateSpecialization:
+    llvm_unreachable("Undeduced type shouldn't get here");
+
+  case Type::Pipe:
+    llvm_unreachable("Pipe types shouldn't get here");
+
+  case Type::Builtin:
+  case Type::BitInt:
+  // GCC treats vector and complex types as fundamental types.
+  case Type::Vector:
+  case Type::ExtVector:
+  case Type::ConstantMatrix:
+  case Type::Complex:
+  case Type::Atomic:
+  // FIXME: GCC treats block pointers as fundamental types?!
+  case Type::BlockPointer:
+    // abi::__fundamental_type_info.
+    vTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
+    break;
+
+  case Type::ConstantArray:
+  case Type::IncompleteArray:
+  case Type::VariableArray:
+    // abi::__array_type_info.
+    vTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
+    break;
+
+  case Type::FunctionNoProto:
+  case Type::FunctionProto:
+    // abi::__function_type_info.
+    vTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
+    break;
+
+  case Type::Enum:
+    // abi::__enum_type_info.
+    vTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
+    break;
+
+  case Type::Record: {
+    const CXXRecordDecl *rd =
+        cast<CXXRecordDecl>(cast<RecordType>(ty)->getOriginalDecl())
+            ->getDefinitionOrSelf();
+
+    if (!rd->hasDefinition() || !rd->getNumBases()) {
+      vTableName = ClassTypeInfo;
+    } else if (CanUseSingleInheritance(rd)) {
+      vTableName = SIClassTypeInfo;
+    } else {
+      vTableName = VMIClassTypeInfo;
+    }
+
+    break;
+  }
+
+  case Type::ObjCObject:
+    // Ignore protocol qualifiers.
+    ty = cast<ObjCObjectType>(ty)->getBaseType().getTypePtr();
+
+    // Handle id and Class.
+    if (isa<BuiltinType>(ty)) {
+      vTableName = ClassTypeInfo;
+      break;
+    }
+
+    assert(isa<ObjCInterfaceType>(ty));
+    [[fallthrough]];
+
+  case Type::ObjCInterface:
+    if (cast<ObjCInterfaceType>(ty)->getDecl()->getSuperClass()) {
+      vTableName = SIClassTypeInfo;
+    } else {
+      vTableName = ClassTypeInfo;
+    }
+    break;
+
+  case Type::ObjCObjectPointer:
+  case Type::Pointer:
+    // abi::__pointer_type_info.
+    vTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
+    break;
+
+  case Type::MemberPointer:
+    // abi::__pointer_to_member_type_info.
+    vTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
+    break;
+  }
+
+  cir::GlobalOp vTable{};
+
+  // Check if the alias exists. If it doesn't, then get or create the global.
+  if (cgm.getItaniumVTableContext().isRelativeLayout())
+    llvm_unreachable("NYI");
+  if (!vTable) {
+    vTable = cgm.getOrInsertGlobal(loc, vTableName,
+                                   cgm.getBuilder().getUInt8PtrTy());
+  }
+
+  // The vtable address point is 2.
+  mlir::Attribute field{};
+  if (cgm.getItaniumVTableContext().isRelativeLayout()) {
+    llvm_unreachable("NYI");
+  } else {
+    SmallVector<mlir::Attribute, 4> offsets{
+        cgm.getBuilder().getI32IntegerAttr(2)};
+    auto indices = mlir::ArrayAttr::get(builder.getContext(), offsets);
+    field = 
cgm.getBuilder().getGlobalViewAttr(cgm.getBuilder().getUInt8PtrTy(),
+                                               vTable, indices);
+  }
+
+  assert(field && "expected attribute");
+  fields.push_back(field);
+}
+
+/// Build an abi::__si_class_type_info, used for single inheritance, according
+/// to the Itanium C++ ABI, 2.95p6b.
+void CIRGenItaniumRTTIBuilder::buildSIClassTypeInfo(mlir::Location loc,
+                                                    const CXXRecordDecl *rd) {
+  // Itanium C++ ABI 2.9.5p6b:
+  // It adds to abi::__class_type_info a single member pointing to the
+  // type_info structure for the base type,
+  auto baseTypeInfo = CIRGenItaniumRTTIBuilder(cxxABI, cgm)
+                          .buildTypeInfo(loc, rd->bases_begin()->getType());
+  fields.push_back(baseTypeInfo);
+}
+
+/// Build an abi::__vmi_class_type_info, used for
+/// classes with bases that do not satisfy the abi::__si_class_type_info
+/// constraints, according to the Itanium C++ ABI, 2.9.5p5c.
+void CIRGenItaniumRTTIBuilder::buildVMIClassTypeInfo(mlir::Location loc,
+                                                     const CXXRecordDecl *rd) {
+  mlir::Type unsignedIntLTy =
+      cgm.convertType(cgm.getASTContext().UnsignedIntTy);
+
+  // Itanium C++ ABI 2.9.5p6c:
+  //   __flags is a word with flags describing details about the class
+  //   structure, which may be referenced by using the __flags_masks
+  //   enumeration. These flags refer to both direct and indirect bases.
+  unsigned flags = ComputeVMIClassTypeInfoFlags(rd);
+  fields.push_back(cir::IntAttr::get(unsignedIntLTy, flags));
+
+  // Itanium C++ ABI 2.9.5p6c:
+  //   __base_count is a word with the number of direct proper base class
+  //   descriptions that follow.
+  fields.push_back(cir::IntAttr::get(unsignedIntLTy, rd->getNumBases()));
+
+  if (!rd->getNumBases())
+    return;
+
+  // Now add the base class descriptions.
+
+  // Itanium C++ ABI 2.9.5p6c:
+  //   __base_info[] is an array of base class descriptions -- one for every
+  //   direct proper base. Each description is of the type:
+  //
+  //   struct abi::__base_class_type_info {
+  //   public:
+  //     const __class_type_info *__base_type;
+  //     long __offset_flags;
+  //
+  //     enum __offset_flags_masks {
+  //       __virtual_mask = 0x1,
+  //       __public_mask = 0x2,
+  //       __offset_shift = 8
+  //     };
+  //   };
+
+  // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
+  // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
+  // LLP64 platforms.
+  // FIXME: Consider updating libc++abi to match, and extend this logic to all
+  // LLP64 platforms.
+  QualType offsetFlagsTy = cgm.getASTContext().LongTy;
+  const TargetInfo &TI = cgm.getASTContext().getTargetInfo();
+  if (TI.getTriple().isOSCygMing() &&
+      TI.getPointerWidth(LangAS::Default) > TI.getLongWidth())
+    offsetFlagsTy = cgm.getASTContext().LongLongTy;
+  mlir::Type offsetFlagsLTy = cgm.convertType(offsetFlagsTy);
+
+  for (const auto &base : rd->bases()) {
+    // The __base_type member points to the RTTI for the base type.
+    fields.push_back(CIRGenItaniumRTTIBuilder(cxxABI, cgm)
+                         .buildTypeInfo(loc, base.getType()));
+
+    CXXRecordDecl *baseDecl = base.getType()->castAsCXXRecordDecl();
+    int64_t offsetFlags = 0;
+
+    // All but the lower 8 bits of __offset_flags are a signed offset.
+    // For a non-virtual base, this is the offset in the object of the base
+    // subobject. For a virtual base, this is the offset in the virtual table 
of
+    // the virtual base offset for the virtual base referenced (negative).
+    CharUnits offset;
+    if (base.isVirtual())
+      offset = cgm.getItaniumVTableContext().getVirtualBaseOffsetOffset(
+          rd, baseDecl);
+    else {
+      const ASTRecordLayout &layout =
+          cgm.getASTContext().getASTRecordLayout(rd);
+      offset = layout.getBaseClassOffset(baseDecl);
+    }
+    offsetFlags = uint64_t(offset.getQuantity()) << 8;
+
+    // The low-order byte of __offset_flags contains flags, as given by the
+    // masks from the enumeration __offset_flags_masks.
+    if (base.isVirtual())
+      offsetFlags |= BCTI_Virtual;
+    if (base.getAccessSpecifier() == AS_public)
+      offsetFlags |= BCTI_Public;
+
+    fields.push_back(cir::IntAttr::get(offsetFlagsLTy, offsetFlags));
+  }
+}
+
+mlir::Attribute CIRGenItaniumRTTIBuilder::buildTypeInfo(mlir::Location loc,
+                                                        QualType ty) {
+  // We want to operate on the canonical type.
+  ty = ty.getCanonicalType();
+
+  // Check if we've already emitted an RTTI descriptor for this type.
+  SmallString<256> name;
+  llvm::raw_svector_ostream out(name);
+  cgm.getCXXABI().getMangleContext().mangleCXXRTTI(ty, out);
+
+  auto oldGV = dyn_cast_or_null<cir::GlobalOp>(
+      mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), name));
+
+  if (oldGV && !oldGV.isDeclaration()) {
+    assert(!oldGV.hasAvailableExternallyLinkage() &&
+           "available_externally typeinfos not yet implemented");
+    return cgm.getBuilder().getGlobalViewAttr(cgm.getBuilder().getUInt8PtrTy(),
+                                              oldGV);
+  }
+
+  // Check if there is already an external RTTI descriptor for this type.
+  if (IsStandardLibraryRTTIDescriptor(ty) ||
+      ShouldUseExternalRTTIDescriptor(cgm, ty))
+    return getAddrOfExternalRTTIDescriptor(loc, ty);
+
+  // Emit the standard library with external linkage.
+  cir::GlobalLinkageKind linkage = getTypeInfoLinkage(cgm, ty);
+
+  // Give the type_info object and name the formal visibility of the
+  // type itself.
+  assert(!cir::MissingFeatures::hiddenVisibility());
+  assert(!cir::MissingFeatures::protectedVisibility());
+
+  mlir::SymbolTable::Visibility symVisibility;
+  if (cir::isLocalLinkage(linkage))
+    // If the linkage is local, only default visibility makes sense.
+    symVisibility = mlir::SymbolTable::Visibility::Public;
+  else if (cxxABI.classifyRTTIUniqueness(ty, linkage) ==
+           CIRGenItaniumCXXABI::RUK_NonUniqueHidden) {
+    cgm.errorNYI(
+        "buildTypeInfo: classifyRTTIUniqueness == RUK_NonUniqueHidden");
+    symVisibility = CIRGenModule::getCIRVisibility(ty->getVisibility());
+  } else
+    symVisibility = CIRGenModule::getCIRVisibility(ty->getVisibility());
+
+  return buildTypeInfo(loc, ty, linkage, symVisibility);
+}
+
+mlir::Attribute CIRGenItaniumRTTIBuilder::buildTypeInfo(
+    mlir::Location loc, QualType ty, cir::GlobalLinkageKind linkage,
+    mlir::SymbolTable::Visibility visibility) {
+  CIRGenBuilderTy &builder = cgm.getBuilder();
+
+  // Add the vtable pointer.
+  buildVTablePointer(loc, cast<Type>(ty));
+
+  // And the name.
+  cir::GlobalOp typeName = getAddrOfTypeName(loc, ty, linkage);
+  mlir::Attribute typeNameField;
+
+  // If we're supposed to demote the visibility, be sure to set a flag
+  // to use a string comparison for type_info comparisons.
+  // FIXME: RTTIUniquenessKind
+
+  typeNameField = builder.getGlobalViewAttr(builder.getUInt8PtrTy(), typeName);
+  fields.push_back(typeNameField);
+
+  switch (ty->getTypeClass()) {
+  case Type::ArrayParameter:
+  case Type::HLSLAttributedResource:
+  case Type::HLSLInlineSpirv:
+    llvm_unreachable("NYI");
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.inc"
+    llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+    // GCC treats vector types as fundamental types.
+  case Type::Builtin:
+  case Type::Vector:
+  case Type::ExtVector:
+  case Type::ConstantMatrix:
+  case Type::Complex:
+  case Type::BlockPointer:
+    // Itanium C++ ABI 2.9.5p4:
+    // abi::__fundamental_type_info adds no data members to std::type_info.
+    break;
+
+  case Type::LValueReference:
+  case Type::RValueReference:
+    llvm_unreachable("References shouldn't get here");
+
+  case Type::Auto:
+  case Type::DeducedTemplateSpecialization:
+    llvm_unreachable("Undeduced type shouldn't get here");
+
+  case Type::Pipe:
+    break;
+
+  case Type::BitInt:
+    break;
+
+  case Type::ConstantArray:
+  case Type::IncompleteArray:
+  case Type::VariableArray:
+    // Itanium C++ ABI 2.9.5p5:
+    // abi::__array_type_info adds no data members to std::type_info.
+    break;
+
+  case Type::FunctionNoProto:
+  case Type::FunctionProto:
+    // Itanium C++ ABI 2.9.5p5:
+    // abi::__function_type_info adds no data members to std::type_info.
+    break;
+
+  case Type::Enum:
+    // Itanium C++ ABI 2.9.5p5:
+    // abi::__enum_type_info adds no data members to std::type_info.
+    break;
+
+  case Type::Record: {
+    const auto *rd =
+        cast<CXXRecordDecl>(cast<RecordType>(ty)->getOriginalDecl())
+            ->getDefinitionOrSelf();
+    if (!rd->hasDefinition() || !rd->getNumBases()) {
+      // We don't need to emit any fields.
+      break;
+    }
+
+    if (CanUseSingleInheritance(rd)) {
+      buildSIClassTypeInfo(loc, rd);
+    } else {
+      buildVMIClassTypeInfo(loc, rd);
+    }
+
+    break;
+  }
+
+  case Type::ObjCObject:
+  case Type::ObjCInterface:
+    llvm_unreachable("NYI");
+    break;
+
+  case Type::ObjCObjectPointer:
+    llvm_unreachable("NYI");
+    break;
+
+  case Type::Pointer:
+    llvm_unreachable("NYI");
+    break;
+
+  case Type::MemberPointer:
+    llvm_unreachable("NYI");
+    break;
+
+  case Type::Atomic:
+    // No fields, at least for the moment.
+    break;
+  }
+
+  auto init = builder.getTypeInfo(builder.getArrayAttr(fields));
+
+  SmallString<256> Name;
+  llvm::raw_svector_ostream Out(Name);
+  cgm.getCXXABI().getMangleContext().mangleCXXRTTI(ty, Out);
+
+  // Create new global and search for an existing global.
+  auto OldGV = dyn_cast_or_null<cir::GlobalOp>(
+      mlir::SymbolTable::lookupSymbolIn(cgm.getModule(), Name));
+  cir::GlobalOp GV =
+      CIRGenModule::createGlobalOp(cgm, loc, Name, init.getType(),
+                                   /*isConstant=*/true);
+
+  // Export the typeinfo in the same circumstances as the vtable is
+  // exported.
+  if (cgm.getTarget().hasPS4DLLImportExport())
+    llvm_unreachable("NYI");
+
+  // If there's already an old global variable, replace it with the new one.
+  if (OldGV) {
+    // Replace occurrences of the old variable if needed.
+    GV.setName(OldGV.getName());
+    if (!OldGV->use_empty()) {
+      // TODO: replaceAllUsesWith
+      llvm_unreachable("NYI");
+    }
+    OldGV->erase();
+  }
+
+  if (cgm.supportsCOMDAT() && cir::isWeakForLinker(GV.getLinkage())) {
+    llvm_unreachable("NYI");
+  }
+
+  mlir::SymbolTable::setSymbolVisibility(
+      typeName, CIRGenModule::getMLIRVisibility(typeName));
+  CIRGenModule::setInitializer(GV, init);
+
+  return builder.getGlobalViewAttr(builder.getUInt8PtrTy(), GV);
+}
+
+mlir::Attribute CIRGenItaniumCXXABI::getAddrOfRTTIDescriptor(mlir::Location 
loc,
+                                                             QualType ty) {
+  return CIRGenItaniumRTTIBuilder(*this, cgm).buildTypeInfo(loc, ty);
+}
+
+/// What sort of uniqueness rules should we use for the RTTI for the
+/// given type?
+CIRGenItaniumCXXABI::RTTIUniquenessKind
+CIRGenItaniumCXXABI::classifyRTTIUniqueness(
+    QualType canTy, cir::GlobalLinkageKind linkage) const {
+  if (shouldRTTIBeUnique())
+    return RUK_Unique;
+
+  // It's only necessary for linkonce_odr or weak_odr linkage.
+  if (linkage != cir::GlobalLinkageKind::LinkOnceODRLinkage &&
+      linkage != cir::GlobalLinkageKind::WeakODRLinkage)
+    return RUK_Unique;
+
+  // It's only necessary with default visibility.
+  if (canTy->getVisibility() != DefaultVisibility)
+    return RUK_Unique;
+
+  // If we're not required to publish this symbol, hide it.
+  if (linkage == cir::GlobalLinkageKind::LinkOnceODRLinkage)
+    return RUK_NonUniqueHidden;
+
+  // If we're required to publish this symbol, as we might be under an
+  // explicit instantiation, leave it with default visibility but
+  // enable string-comparisons.
+  assert(linkage == cir::GlobalLinkageKind::WeakODRLinkage);
+  return RUK_NonUniqueVisible;
+}
+
 void CIRGenItaniumCXXABI::emitDestructorCall(
     CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type,
     bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp 
b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index eef23a0ebda7f..ea150a2989c39 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -2171,8 +2171,53 @@ mlir::Attribute 
CIRGenModule::getAddrOfRTTIDescriptor(mlir::Location loc,
   if (!shouldEmitRTTI(forEh))
     return builder.getConstNullPtrAttr(builder.getUInt8PtrTy());
 
-  errorNYI(loc, "getAddrOfRTTIDescriptor");
-  return mlir::Attribute();
+  if (forEh && ty->isObjCObjectPointerType() &&
+      langOpts.ObjCRuntime.isGNUFamily()) {
+    errorNYI(loc, "getAddrOfRTTIDescriptor: Objc PtrType & Objc RT GUN");
+    return {};
+  }
+
+  return getCXXABI().getAddrOfRTTIDescriptor(loc, ty);
+}
+
+/// TODO(cir): once we have cir.module, add this as a convenience method there.
+///
+/// Look up the specified global in the module symbol table.
+///   1. If it does not exist, add a declaration of the global and return it.
+///   2. Else, the global exists but has the wrong type: return the function
+///      with a constantexpr cast to the right type.
+///   3. Finally, if the existing global is the correct declaration, return the
+///      existing global.
+cir::GlobalOp CIRGenModule::getOrInsertGlobal(
+    mlir::Location loc, StringRef name, mlir::Type ty,
+    llvm::function_ref<cir::GlobalOp()> createGlobalCallback) {
+  // See if we have a definition for the specified global already.
+  auto gv = dyn_cast_or_null<cir::GlobalOp>(getGlobalValue(name));
+  if (!gv) {
+    gv = createGlobalCallback();
+  }
+  assert(gv && "The CreateGlobalCallback is expected to create a global");
+
+  // If the variable exists but has the wrong type, return a bitcast to the
+  // right type.
+  auto gvTy = gv.getSymType();
+  assert(!cir::MissingFeatures::addressSpace());
+  auto pTy = builder.getPointerTo(ty);
+
+  if (gvTy != pTy)
+    llvm_unreachable("NYI");
+
+  // Otherwise, we just found the existing function or a prototype.
+  return gv;
+}
+
+// Overload to construct a global variable using its constructor's defaults.
+cir::GlobalOp CIRGenModule::getOrInsertGlobal(mlir::Location loc,
+                                              StringRef name, mlir::Type ty) {
+  return getOrInsertGlobal(loc, name, ty, [&] {
+    return CIRGenModule::createGlobalOp(*this, loc, name,
+                                        builder.getPointerTo(ty));
+  });
 }
 
 // TODO(cir): this can be shared with LLVM codegen.
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h 
b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 95a7ac0648bb7..00c79526d28c8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -148,6 +148,23 @@ class CIRGenModule : public CIRGenTypeCache {
   cir::GlobalOp getOrCreateCIRGlobal(const VarDecl *d, mlir::Type ty,
                                      ForDefinition_t isForDefinition);
 
+  /// TODO(cir): once we have cir.module, add this as a convenience method
+  /// there instead of here.
+  ///
+  /// Look up the specified global in the module symbol table.
+  ///   1. If it does not exist, add a declaration of the global and return it.
+  ///   2. Else, the global exists but has the wrong type: return the function
+  ///      with a constantexpr cast to the right type.
+  ///   3. Finally, if the existing global is the correct declaration, return
+  ///      the existing global.
+  cir::GlobalOp
+  getOrInsertGlobal(mlir::Location loc, llvm::StringRef name, mlir::Type ty,
+                    llvm::function_ref<cir::GlobalOp()> createGlobalCallback);
+
+  // Overload to construct a global variable using its constructor's defaults.
+  cir::GlobalOp getOrInsertGlobal(mlir::Location loc, llvm::StringRef name,
+                                  mlir::Type ty);
+
   static cir::GlobalOp createGlobalOp(CIRGenModule &cgm, mlir::Location loc,
                                       llvm::StringRef name, mlir::Type t,
                                       bool isConstant = false,
@@ -250,6 +267,18 @@ class CIRGenModule : public CIRGenTypeCache {
   mlir::Attribute getAddrOfRTTIDescriptor(mlir::Location loc, QualType ty,
                                           bool forEH = false);
 
+  static mlir::SymbolTable::Visibility getCIRVisibility(Visibility v) {
+    switch (v) {
+    case DefaultVisibility:
+      return mlir::SymbolTable::Visibility::Public;
+    case HiddenVisibility:
+      return mlir::SymbolTable::Visibility::Private;
+    case ProtectedVisibility:
+      llvm_unreachable("NYI");
+    }
+    llvm_unreachable("unknown visibility!");
+  }
+
   /// Return a constant array for the given string.
   mlir::Attribute getConstantArrayFromStringLiteral(const StringLiteral *e);
 
diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp 
b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
index af8f5ae2cc0a5..bc001cdd8bfbe 100644
--- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp
@@ -47,6 +47,49 @@ cir::RecordType CIRGenVTables::getVTableType(const 
VTableLayout &layout) {
   return cgm.getBuilder().getAnonRecordTy(tys, /*incomplete=*/false);
 }
 
+/// At this point in the translation unit, does it appear that can we
+/// rely on the vtable being defined elsewhere in the program?
+///
+/// The response is really only definitive when called at the end of
+/// the translation unit.
+///
+/// The only semantic restriction here is that the object file should
+/// not contain a vtable definition when that vtable is defined
+/// strongly elsewhere.  Otherwise, we'd just like to avoid emitting
+/// vtables when unnecessary.
+/// TODO(cir): this should be merged into common AST helper for codegen.
+bool CIRGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
+  assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable.");
+
+  // We always synthesize vtables if they are needed in the MS ABI. MSVC 
doesn't
+  // emit them even if there is an explicit template instantiation.
+  if (cgm.getTarget().getCXXABI().isMicrosoft())
+    return false;
+
+  // If we have an explicit instantiation declaration (and not a
+  // definition), the vtable is defined elsewhere.
+  TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
+  if (TSK == TSK_ExplicitInstantiationDeclaration)
+    return true;
+
+  // Otherwise, if the class is an instantiated template, the
+  // vtable must be defined here.
+  if (TSK == TSK_ImplicitInstantiation ||
+      TSK == TSK_ExplicitInstantiationDefinition)
+    return false;
+
+  // Otherwise, if the class doesn't have a key function (possibly
+  // anymore), the vtable must be defined here.
+  const CXXMethodDecl *keyFunction =
+      cgm.getASTContext().getCurrentKeyFunction(RD);
+  if (!keyFunction)
+    return false;
+
+  // Otherwise, if we don't have a definition of the key function, the
+  // vtable must be defined somewhere else.
+  return !keyFunction->hasBody();
+}
+
 /// This is a callback from Sema to tell us that a particular vtable is
 /// required to be emitted in this translation unit.
 ///
diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h 
b/clang/lib/CIR/CodeGen/CIRGenVTables.h
index e19242c651034..9c425ab43b3d9 100644
--- a/clang/lib/CIR/CodeGen/CIRGenVTables.h
+++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h
@@ -100,6 +100,8 @@ class CIRGenVTables {
   /// is enabled) and the VTT (if the class has virtual bases).
   void generateClassData(const CXXRecordDecl *rd);
 
+  bool isVTableExternal(const clang::CXXRecordDecl *rd);
+
   /// Returns the type of a vtable with the given layout. Normally a struct of
   /// arrays of pointers, with one struct element for each vtable in the vtable
   /// group.
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 1865698838134..767bef9cd11ab 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -222,8 +222,9 @@ class CIRAttrToValue {
     return llvm::TypeSwitch<mlir::Attribute, mlir::Value>(attr)
         .Case<cir::IntAttr, cir::FPAttr, cir::ConstComplexAttr,
               cir::ConstArrayAttr, cir::ConstRecordAttr, cir::ConstVectorAttr,
-              cir::ConstPtrAttr, cir::GlobalViewAttr, cir::VTableAttr,
-              cir::ZeroAttr>([&](auto attrT) { return visitCirAttr(attrT); })
+              cir::ConstPtrAttr, cir::GlobalViewAttr, cir::TypeInfoAttr,
+              cir::VTableAttr, cir::ZeroAttr>(
+            [&](auto attrT) { return visitCirAttr(attrT); })
         .Default([&](auto attrT) { return mlir::Value(); });
   }
 
@@ -1694,7 +1695,7 @@ 
CIRToLLVMGlobalOpLowering::matchAndRewriteRegionInitializedGlobal(
   // TODO: Generalize this handling when more types are needed here.
   assert((isa<cir::ConstArrayAttr, cir::ConstRecordAttr, cir::ConstVectorAttr,
               cir::ConstPtrAttr, cir::ConstComplexAttr, cir::GlobalViewAttr,
-              cir::VTableAttr, cir::ZeroAttr>(init)));
+              cir::TypeInfoAttr, cir::VTableAttr, cir::ZeroAttr>(init)));
 
   // TODO(cir): once LLVM's dialect has proper equivalent attributes this
   // should be updated. For now, we use a custom op to initialize globals
@@ -1749,7 +1750,8 @@ mlir::LogicalResult 
CIRToLLVMGlobalOpLowering::matchAndRewrite(
     } else if (mlir::isa<cir::ConstArrayAttr, cir::ConstVectorAttr,
                          cir::ConstRecordAttr, cir::ConstPtrAttr,
                          cir::ConstComplexAttr, cir::GlobalViewAttr,
-                         cir::VTableAttr, cir::ZeroAttr>(init.value())) {
+                         cir::TypeInfoAttr, cir::VTableAttr, cir::ZeroAttr>(
+                   init.value())) {
       // TODO(cir): once LLVM's dialect has proper equivalent attributes this
       // should be updated. For now, we use a custom op to initialize globals
       // to the appropriate value.
diff --git a/clang/test/CIR/CodeGen/vtable-rtti.cpp 
b/clang/test/CIR/CodeGen/vtable-rtti.cpp
new file mode 100644
index 0000000000000..546ebbdec84e4
--- /dev/null
+++ b/clang/test/CIR/CodeGen/vtable-rtti.cpp
@@ -0,0 +1,503 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o 
%t.cir
+// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o 
%t-cir.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll  %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll  %s
+
+class A {
+public:
+  int a;
+  virtual void v();
+};
+
+class B : public virtual A {
+public:
+  int b;
+  virtual void w();
+};
+
+class C : public virtual A {
+public:
+  long c;
+  virtual void x();
+};
+
+class D : public B, public C {
+public:
+  long d;
+  D();
+  virtual void y();
+};
+
+// This is just here to force the record types to be emitted.
+void f(D *d) {}
+
+// Trigger vtable and VTT emission for D.
+void D::y() {}
+
+// CIR: !rec_A2Ebase = !cir.record<struct "A.base" packed {!cir.vptr, !s32i}>
+// CIR: !rec_B2Ebase = !cir.record<struct "B.base" packed {!cir.vptr, !s32i}>
+// CIR: !rec_C2Ebase = !cir.record<struct "C.base" {!cir.vptr, !s64i}>
+// CIR: !rec_A = !cir.record<class "A" packed padded {!cir.vptr, !s32i, 
!cir.array<!u8i x 4>}>
+// CIR: !rec_B = !cir.record<class "B" packed padded {!cir.vptr, !s32i, 
!cir.array<!u8i x 4>, !rec_A2Ebase, !cir.array<!u8i x 4>}>
+// CIR: !rec_C = !cir.record<class "C" {!cir.vptr, !s64i, !rec_A2Ebase}>
+// CIR: !rec_D = !cir.record<class "D" {!rec_B2Ebase, !rec_C2Ebase, !s64i, 
!rec_A2Ebase}>
+
+// CIR: !rec_anon_struct = !cir.record<struct  {!cir.ptr<!u8i>, 
!cir.ptr<!u8i>, !u32i, !u32i, !cir.ptr<!u8i>, !s64i, !cir.ptr<!u8i>, !s64i}>
+// CIR: !rec_anon_struct1 = !cir.record<struct  {!cir.array<!cir.ptr<!u8i> x 
5>, !cir.array<!cir.ptr<!u8i> x 4>, !cir.array<!cir.ptr<!u8i> x 4>}>
+// CIR: !rec_anon_struct2 = !cir.record<struct  {!cir.array<!cir.ptr<!u8i> x 
4>, !cir.array<!cir.ptr<!u8i> x 4>}>
+
+// Vtable for D
+
+// CIR:      cir.global{{.*}} @_ZTV1D = #cir.vtable<{
+// CIR-SAME:   #cir.const_array<[#cir.ptr<40 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME:     #cir.ptr<null> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZTI1D> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZN1B1wEv> : !cir.ptr<!u8i>,
+// CIR-SAME:     #cir.global_view<@_ZN1D1yEv> : !cir.ptr<!u8i>
+// CIR-SAME:   ]> : !cir.array<!cir.ptr<!u8i> x 5>, 
+// CIR-SAME:   #cir.const_array<[#cir.ptr<24 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME:     #cir.ptr<-16 : i64> : !cir.ptr<!u8i>,
+// CIR-SAME:     #cir.global_view<@_ZTI1D> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZN1C1xEv> : !cir.ptr<!u8i>
+// CIR-SAME:   ]> : !cir.array<!cir.ptr<!u8i> x 4>,
+// CIR-SAME:   #cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.ptr<-40 : i64> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZTI1D> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZN1A1vEv> : !cir.ptr<!u8i>
+// CIR-SAME:   ]> : !cir.array<!cir.ptr<!u8i> x 4>
+// CIR-SAME: }> : !rec_anon_struct1
+
+// LLVM:     @_ZTV1D = global { 
+// LLVM-SAME:   [5 x ptr], [4 x ptr], [4 x ptr] } 
+// LLVM-SAME:   { [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr 
@_ZTI1D, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv],
+// LLVM-SAME:   [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 
to ptr), ptr @_ZTI1D, ptr @_ZN1C1xEv], 
+// LLVM-SAME:   [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr 
@_ZTI1D, ptr @_ZN1A1vEv] 
+// LLVM-SAME:  }, align 8
+
+// OGCG:     @_ZTV1D = unnamed_addr constant { 
+// OGCG-SAME:   [5 x ptr], [4 x ptr], [4 x ptr] } 
+// OGCG-SAME:   { [5 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr 
@_ZTI1D, ptr @_ZN1B1wEv, ptr @_ZN1D1yEv],
+// OGCG-SAME:   [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr inttoptr (i64 -16 
to ptr), ptr @_ZTI1D, ptr @_ZN1C1xEv], 
+// OGCG-SAME:   [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr 
@_ZTI1D, ptr @_ZN1A1vEv] 
+// OGCG-SAME:  }, align 8
+
+// VTT for D
+
+// CIR:      cir.global{{.*}} @_ZTT1D = #cir.const_array<[
+// CIR-SAME:   #cir.global_view<@_ZTV1D, [0 : i32, 3 : i32]> : !cir.ptr<!u8i>, 
+// CIR-SAME:   #cir.global_view<@_ZTC1D0_1B, [0 : i32, 3 : i32]> : 
!cir.ptr<!u8i>, 
+// CIR-SAME:   #cir.global_view<@_ZTC1D0_1B, [1 : i32, 3 : i32]> : 
!cir.ptr<!u8i>, 
+// CIR-SAME:   #cir.global_view<@_ZTC1D16_1C, [0 : i32, 3 : i32]> : 
!cir.ptr<!u8i>, 
+// CIR-SAME:   #cir.global_view<@_ZTC1D16_1C, [1 : i32, 3 : i32]> : 
!cir.ptr<!u8i>, 
+// CIR-SAME:   #cir.global_view<@_ZTV1D, [2 : i32, 3 : i32]> : !cir.ptr<!u8i>, 
+// CIR-SAME:   #cir.global_view<@_ZTV1D, [1 : i32, 3 : i32]> : !cir.ptr<!u8i>
+// CIR-SAME: ]> : !cir.array<!cir.ptr<!u8i> x 7>
+
+// LLVM:      @_ZTT1D = global [7 x ptr] [
+// LLVM-SAME:   ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), 
+// LLVM-SAME:   ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 24), 
+// LLVM-SAME:   ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D0_1B, i64 56), 
+// LLVM-SAME:   ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 24), 
+// LLVM-SAME:   ptr getelementptr inbounds nuw (i8, ptr @_ZTC1D16_1C, i64 56), 
+// LLVM-SAME:   ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), 
+// LLVM-SAME:   ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64)
+// LLVM-SAME: ], align 8
+
+// OGCG:      @_ZTT1D = unnamed_addr constant [7 x ptr] [
+// OGCG-SAME:   ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x 
ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3),
+// OGCG-SAME:   ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x 
ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 0, i32 3),
+// OGCG-SAME:   ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x 
ptr] }, ptr @_ZTC1D0_1B, i32 0, i32 1, i32 3),
+// OGCG-SAME:   ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x 
ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 0, i32 3),
+// OGCG-SAME:   ptr getelementptr inbounds inrange(-24, 8) ({ [4 x ptr], [4 x 
ptr] }, ptr @_ZTC1D16_1C, i32 0, i32 1, i32 3),
+// OGCG-SAME:   ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x 
ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3),
+// OGCG-SAME:   ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x 
ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3)
+// OGCG-SAME: ], align 8
+
+// Construction vtable for B-in-D
+
+// CIR:      cir.global{{.*}} @_ZTC1D0_1B = #cir.vtable<{
+// CIR-SAME:   #cir.const_array<[
+// CIR-SAME:     #cir.ptr<40 : i64> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.ptr<null> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZTI1B> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZN1B1wEv> : !cir.ptr<!u8i>
+// CIR-SAME:   ]> : !cir.array<!cir.ptr<!u8i> x 4>,
+// CIR-SAME:   #cir.const_array<[
+// CIR-SAME:     #cir.ptr<null> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.ptr<-40 : i64> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZTI1B> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZN1A1vEv> : !cir.ptr<!u8i>
+// CIR-SAME:   ]> : !cir.array<!cir.ptr<!u8i> x 4>}> : !rec_anon_struct2
+
+// LLVM:      @_ZTC1D0_1B = global { [4 x ptr], [4 x ptr] } {
+// LLVM-SAME:   [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr 
@_ZTI1B, ptr @_ZN1B1wEv],
+// LLVM-SAME:   [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr 
@_ZTI1B, ptr @_ZN1A1vEv]
+// LLVM-SAME: }, align 8
+
+// OGCG:      @_ZTC1D0_1B = unnamed_addr constant { [4 x ptr], [4 x ptr] } {
+// OGCG-SAME:   [4 x ptr] [ptr inttoptr (i64 40 to ptr), ptr null, ptr 
@_ZTI1B, ptr @_ZN1B1wEv],
+// OGCG-SAME:   [4 x ptr] [ptr null, ptr inttoptr (i64 -40 to ptr), ptr 
@_ZTI1B, ptr @_ZN1A1vEv]
+// OGCG-SAME: }, align 8
+
+// CIR:  cir.global{{.*}} @_ZTI1B : !cir.ptr<!u8i>
+
+// LLVM: @_ZTI1B = external global ptr
+
+// OGCG: @_ZTI1B = external constant ptr
+
+// Construction vtable for C-in-D
+
+// CIR:      cir.global{{.*}} @_ZTC1D16_1C = #cir.vtable<{
+// CIR-SAME:   #cir.const_array<[
+// CIR-SAME:     #cir.ptr<24 : i64> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.ptr<null> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZTI1C> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZN1C1xEv> : !cir.ptr<!u8i>
+// CIR-SAME:   ]> : !cir.array<!cir.ptr<!u8i> x 4>, 
+// CIR-SAME:   #cir.const_array<[
+// CIR-SAME:     #cir.ptr<null> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.ptr<-24 : i64> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZTI1C> : !cir.ptr<!u8i>, 
+// CIR-SAME:     #cir.global_view<@_ZN1A1vEv> : !cir.ptr<!u8i>
+// CIR-SAME:   ]> : !cir.array<!cir.ptr<!u8i> x 4>}> : !rec_anon_struct2
+
+// LLVM:      @_ZTC1D16_1C = global { [4 x ptr], [4 x ptr] } {
+// LLVM-SAME:   [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr 
@_ZTI1C, ptr @_ZN1C1xEv],
+// LLVM-SAME:   [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr 
@_ZTI1C, ptr @_ZN1A1vEv]
+// LLVM-SAME: }, align 8
+
+// OGCG:      @_ZTC1D16_1C = unnamed_addr constant { [4 x ptr], [4 x ptr] } {
+// OGCG-SAME:   [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr 
@_ZTI1C, ptr @_ZN1C1xEv],
+// OGCG-SAME:   [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr 
@_ZTI1C, ptr @_ZN1A1vEv]
+// OGCG-SAME: }, align 8
+
+// CIR: cir.global{{.*}} @_ZTI1C : !cir.ptr<!u8i>
+
+// LLVM: @_ZTI1C = external global ptr
+
+// OGCG: @_ZTI1C = external constant ptr
+
+// RTTI class type info for D
+
+// CIR:  cir.globa{{.*}} @_ZTVN10__cxxabiv121__vmi_class_type_infoE : 
!cir.ptr<!cir.ptr<!u8i>>
+
+// CIR:  cir.global{{.*}} @_ZTS1D = #cir.const_array<"1D" : !cir.array<!s8i x 
2>> : !cir.array<!s8i x 2>
+
+// CIR:      cir.global{{.*}} @_ZTI1D = #cir.typeinfo<{
+// CIR-SAME:   #cir.global_view<@_ZTVN10__cxxabiv121__vmi_class_type_infoE, [2 
: i32]> : !cir.ptr<!u8i>, 
+// CIR-SAME:   #cir.global_view<@_ZTS1D> : !cir.ptr<!u8i>, 
+// CIR-SAME:   #cir.int<2> : !u32i, #cir.int<2> : !u32i, 
+// CIR-SAME:   #cir.global_view<@_ZTI1B> : !cir.ptr<!u8i>, 
+// CIR-SAME:   #cir.int<2> : !s64i, 
+// CIR-SAME:   #cir.global_view<@_ZTI1C> : !cir.ptr<!u8i>, 
+// CIR-SAME:   #cir.int<4098> : !s64i}> : !rec_anon_struct
+
+// CIR: cir.global{{.*}} @_ZTV1A : !rec_anon_struct3 
+
+// LLVM: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global ptr
+// LLVM: @_ZTS1D = global [2 x i8] c"1D", align 1
+
+// LLVM:      @_ZTI1D = constant { ptr, ptr, i32, i32, ptr, i64, ptr, i64 } {
+// LLVM-SAME:   ptr getelementptr (i8, ptr 
@_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 16), 
+// LLVM-SAME:   ptr @_ZTS1D, i32 2, i32 2, ptr @_ZTI1B, i64 2, ptr @_ZTI1C, 
i64 4098 }
+
+// OGCG:      @_ZTI1D = constant { ptr, ptr, i32, i32, ptr, i64, ptr, i64 } {
+// OGCG-SAME:   ptr getelementptr inbounds (ptr, ptr 
@_ZTVN10__cxxabiv121__vmi_class_type_infoE, i64 2),
+// OGCG-SAME:   ptr @_ZTS1D, i32 2, i32 2, ptr @_ZTI1B, i64 2, ptr @_ZTI1C, 
i64 4098 }, align 8
+
+// OGCG: @_ZTVN10__cxxabiv121__vmi_class_type_infoE = external global [0 x ptr]
+// OGCG: @_ZTS1D = constant [3 x i8] c"1D\00", align 1
+// OGCG: @_ZTV1A = external unnamed_addr constant { [3 x ptr] }, align 8
+
+D::D() {}
+
+// In CIR, this gets emitted after the B and C constructors. See below.
+// Base (C2) constructor for D
+
+// OGCG: define {{.*}} void @_ZN1DC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} 
%[[VTT_ARG:.*]])
+// OGCG:   %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG:   %[[VTT_ADDR:.*]] = alloca ptr
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG:   store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG:   %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]]
+// OGCG:   %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1
+// OGCG:   call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} %[[B_VTT]])
+// OGCG:   %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16
+// OGCG:   %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 3
+// OGCG:   call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} %[[C_VTT]])
+// OGCG:   %[[VPTR:.*]] = load ptr, ptr %[[VTT]]
+// OGCG:   store ptr %[[VPTR]], ptr %[[THIS]]
+// OGCG:   %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 
5
+// OGCG:   %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]]
+// OGCG:   %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]]
+// OGCG:   %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], 
i64 -24
+// OGCG:   %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]]
+// OGCG:   %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 
%[[BASE_OFFSET]]
+// OGCG:   store ptr %[[D_VPTR]], ptr %[[BASE_PTR]]
+// OGCG:   %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 
6
+// OGCG:   %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]]
+// OGCG:   %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16
+// OGCG:   store ptr %[[C_VPTR]], ptr %[[C_ADDR]]
+
+// Base (C2) constructor for B
+
+// CIR:      cir.func {{.*}} @_ZN1BC2Ev
+// CIR-SAME:                      %[[THIS_ARG:.*]]: !cir.ptr<!rec_B>
+// CIR-SAME:                      %[[VTT_ARG:.*]]: !cir.ptr<!cir.ptr<!void>>
+// CIR:        %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
+// CIR:        %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init]
+// CIR:        cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR:        cir.store %[[VTT_ARG]], %[[VTT_ADDR]]
+// CIR:        %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR:        %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]]
+// CIR:        %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : 
!cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
+// CIR:        %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : 
!cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR:        %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]]
+// CIR:        %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
+// CIR:        cir.store{{.*}} %[[VPTR]], %[[B_VPTR_ADDR]]
+// CIR:        %[[B_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : 
!cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+// CIR:        %[[B_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[B_VTT_ADDR_POINT]] : 
!cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR:        %[[B_VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]]
+// CIR:        %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
+// CIR:        %[[VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]]
+// CIR:        %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), 
!cir.ptr<!u8i>
+// CIR:        %[[CONST_24:.*]] = cir.const #cir.int<-24>
+// CIR:        %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : 
!cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i>
+// CIR:        %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, 
%[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i>
+// CIR:        %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : 
!cir.ptr<!s64i>, !s64i
+// CIR:        %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : 
!cir.ptr<!rec_B>), !cir.ptr<!u8i>
+// CIR:        %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : 
!cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i>
+// CIR:        %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : 
!cir.ptr<!u8i>), !cir.ptr<!rec_B>
+// CIR:        %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]]
+// CIR:        cir.store{{.*}} %[[B_VPTR]], %[[BASE_VPTR_ADDR]]
+
+// LLVM: define {{.*}} void @_ZN1BC2Ev(ptr %[[THIS_ARG:.*]], ptr 
%[[VTT_ARG:.*]])
+// LLVM:   %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM:   %[[VTT_ADDR:.*]] = alloca ptr
+// LLVM:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM:   store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]]
+// LLVM:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM:   %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]]
+// LLVM:   %[[VPTR:.*]] = load ptr, ptr %[[VTT]]
+// LLVM:   store ptr %[[VPTR]], ptr %[[THIS]]
+// LLVM:   %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 
1
+// LLVM:   %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]]
+// LLVM:   %[[VPTR:.*]] = load ptr, ptr %[[THIS]]
+// LLVM:   %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24
+// LLVM:   %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]]
+// LLVM:   %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 
%[[BASE_OFFSET]]
+// LLVM:   store ptr %[[B_VPTR]], ptr %[[BASE_PTR]]
+
+// OGCG: define {{.*}} void @_ZN1BC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} 
%[[VTT_ARG:.*]])
+// OGCG:   %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG:   %[[VTT_ADDR:.*]] = alloca ptr
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG:   store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG:   %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]]
+// OGCG:   %[[VPTR:.*]] = load ptr, ptr %[[VTT]]
+// OGCG:   store ptr %[[VPTR]], ptr %[[THIS]]
+// OGCG:   %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 
1
+// OGCG:   %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]]
+// OGCG:   %[[VPTR:.*]] = load ptr, ptr %[[THIS]]
+// OGCG:   %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24
+// OGCG:   %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]]
+// OGCG:   %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 
%[[BASE_OFFSET]]
+// OGCG:   store ptr %[[B_VPTR]], ptr %[[BASE_PTR]]
+
+// Base (C2) constructor for C
+
+// CIR:      cir.func {{.*}} @_ZN1CC2Ev
+// CIR-SAME:                      %[[THIS_ARG:.*]]: !cir.ptr<!rec_C>
+// CIR-SAME:                      %[[VTT_ARG:.*]]: !cir.ptr<!cir.ptr<!void>>
+// CIR:        %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
+// CIR:        %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init]
+// CIR:        cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR:        cir.store %[[VTT_ARG]], %[[VTT_ADDR]]
+// CIR:        %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR:        %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]]
+// CIR:        %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : 
!cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
+// CIR:        %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : 
!cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR:        %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]]
+// CIR:        %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
+// CIR:        cir.store{{.*}} %[[VPTR]], %[[C_VPTR_ADDR]]
+// CIR:        %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : 
!cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+// CIR:        %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : 
!cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR:        %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]]
+// CIR:        %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
+// CIR:        %[[VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]]
+// CIR:        %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), 
!cir.ptr<!u8i>
+// CIR:        %[[CONST_24:.*]] = cir.const #cir.int<-24>
+// CIR:        %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : 
!cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i>
+// CIR:        %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, 
%[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i>
+// CIR:        %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : 
!cir.ptr<!s64i>, !s64i
+// CIR:        %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : 
!cir.ptr<!rec_C>), !cir.ptr<!u8i>
+// CIR:        %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : 
!cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i>
+// CIR:        %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : 
!cir.ptr<!u8i>), !cir.ptr<!rec_C>
+// CIR:        %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]]
+// CIR:        cir.store{{.*}} %[[C_VPTR]], %[[BASE_VPTR_ADDR]]
+
+// LLVM: define {{.*}} void @_ZN1CC2Ev(ptr %[[THIS_ARG:.*]], ptr 
%[[VTT_ARG:.*]])
+// LLVM:   %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM:   %[[VTT_ADDR:.*]] = alloca ptr
+// LLVM:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM:   store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]]
+// LLVM:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM:   %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]]
+// LLVM:   %[[VPTR:.*]] = load ptr, ptr %[[VTT]]
+// LLVM:   store ptr %[[VPTR]], ptr %[[THIS]]
+// LLVM:   %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 
1
+// LLVM:   %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]]
+// LLVM:   %[[VPTR:.*]] = load ptr, ptr %[[THIS]]
+// LLVM:   %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24
+// LLVM:   %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]]
+// LLVM:   %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 
%[[BASE_OFFSET]]
+// LLVM:   store ptr %[[B_VPTR]], ptr %[[BASE_PTR]]
+
+// OGCG: define {{.*}} void @_ZN1CC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} 
%[[VTT_ARG:.*]])
+// OGCG:   %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG:   %[[VTT_ADDR:.*]] = alloca ptr
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG:   store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG:   %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]]
+// OGCG:   %[[VPTR:.*]] = load ptr, ptr %[[VTT]]
+// OGCG:   store ptr %[[VPTR]], ptr %[[THIS]]
+// OGCG:   %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 
1
+// OGCG:   %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]]
+// OGCG:   %[[VPTR:.*]] = load ptr, ptr %[[THIS]]
+// OGCG:   %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24
+// OGCG:   %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]]
+// OGCG:   %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 
%[[BASE_OFFSET]]
+// OGCG:   store ptr %[[B_VPTR]], ptr %[[BASE_PTR]]
+
+// Base (C2) constructor for D
+
+// CIR:      cir.func {{.*}} @_ZN1DC2Ev
+// CIR-SAME:                      %[[THIS_ARG:.*]]: !cir.ptr<!rec_D>
+// CIR-SAME:                      %[[VTT_ARG:.*]]: !cir.ptr<!cir.ptr<!void>>
+// CIR:        %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
+// CIR:        %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init]
+// CIR:        cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR:        cir.store %[[VTT_ARG]], %[[VTT_ADDR]]
+// CIR:        %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR:        %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]]
+// CIR:        %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : 
!cir.ptr<!rec_D> nonnull [0] -> !cir.ptr<!rec_B>
+// CIR:        %[[B_VTT:.*]] = cir.vtt.address_point %[[VTT]] : 
!cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>>
+// CIR:        cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : 
(!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> ()
+// CIR:        %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : 
!cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C>
+// CIR:        %[[C_VTT:.*]] = cir.vtt.address_point %[[VTT]] : 
!cir.ptr<!cir.ptr<!void>>, offset = 3 -> !cir.ptr<!cir.ptr<!void>>
+// CIR:        cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : 
(!cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!void>>) -> ()
+// CIR:        %[[D_VTT:.*]] = cir.vtt.address_point %[[VTT]] : 
!cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>>
+// CIR:        %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT]] : 
!cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR:        %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] : 
!cir.ptr<!cir.vptr>, !cir.vptr
+// CIR:        %[[D_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]]
+// CIR:        cir.store{{.*}} %[[VPTR]], %[[D_VPTR_ADDR]]
+// CIR:        %[[D_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : 
!cir.ptr<!cir.ptr<!void>>, offset = 5 -> !cir.ptr<!cir.ptr<!void>>
+// CIR:        %[[D_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT_ADDR_POINT]] : 
!cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR:        %[[D_VPTR:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR]] : 
!cir.ptr<!cir.vptr>, !cir.vptr
+// CIR:        %[[D_VPTR_ADDR2:.*]] = cir.vtable.get_vptr %[[THIS]] : 
!cir.ptr<!rec_D> -> !cir.ptr<!cir.vptr>
+// CIR:        %[[VPTR2:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR2]] : 
!cir.ptr<!cir.vptr>, !cir.vptr
+// CIR:        %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR2]] : !cir.vptr), 
!cir.ptr<!u8i>
+// CIR:        %[[CONST_24:.*]] = cir.const #cir.int<-24> : !s64i
+// CIR:        %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : 
!cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i>
+// CIR:        %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, 
%[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i>
+// CIR:        %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : 
!cir.ptr<!s64i>, !s64i
+// CIR:        %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : 
!cir.ptr<!rec_D>), !cir.ptr<!u8i>
+// CIR:        %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : 
!cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i>
+// CIR:        %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : 
!cir.ptr<!u8i>), !cir.ptr<!rec_D>
+// CIR:        %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]]
+// CIR:        cir.store{{.*}} %[[D_VPTR]], %[[BASE_VPTR_ADDR]]
+// CIR:        %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : 
!cir.ptr<!cir.ptr<!void>>, offset = 6 -> !cir.ptr<!cir.ptr<!void>>
+// CIR:        %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : 
!cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr>
+// CIR:        %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] : 
!cir.ptr<!cir.vptr>, !cir.vptr
+// CIR:        %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : 
!cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C>
+// CIR:        %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : 
!cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
+// CIR:        cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, 
!cir.ptr<!cir.vptr>
+
+// The C2 constructor for D gets emitted earlier in OGCG, see above.
+
+// Base (C2) constructor for A
+
+// CIR:      cir.func {{.*}} @_ZN1AC2Ev
+// CIR-SAME:                      %[[THIS_ARG:.*]]: !cir.ptr<!rec_A>
+// CIR:        %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
+// CIR:        cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR:        %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR:        %[[VPTR:.*]] = cir.vtable.address_point(@_ZTV1A, address_point 
= <index = 0, offset = 2>) : !cir.vptr
+// CIR:        %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : 
!cir.ptr<!rec_A> -> !cir.ptr<!cir.vptr>
+// CIR:        cir.store{{.*}} %[[VPTR]], %[[VPTR_ADDR]] : !cir.vptr, 
!cir.ptr<!cir.vptr>
+
+// LLVM: define {{.*}} void @_ZN1AC2Ev(ptr %[[THIS_ARG:.*]]) {
+// LLVM:   %[[THIS_ADDR:.*]] = alloca ptr, i64 1, align 8
+// LLVM:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]], align 8
+// LLVM:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8
+// LLVM:   store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1A, i64 16), ptr 
%[[THIS]]
+
+// The C2 constructor for A gets emitted later in OGCG, see below.
+
+// Complete (C1) constructor for D
+
+// CIR:      cir.func {{.*}} @_ZN1DC1Ev
+// CIR-SAME:                      %[[THIS_ARG:.*]]: !cir.ptr<!rec_D>
+// CIR:        %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init]
+// CIR:        cir.store %[[THIS_ARG]], %[[THIS_ADDR]]
+// CIR:        %[[THIS:.*]] = cir.load %[[THIS_ADDR]]
+// CIR:        %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : 
!cir.ptr<!rec_D> nonnull [40] -> !cir.ptr<!rec_A>
+// CIR:        cir.call @_ZN1AC2Ev(%[[A_ADDR]]) nothrow : (!cir.ptr<!rec_A>) 
-> ()
+// CIR:        %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : 
!cir.ptr<!rec_D> nonnull [0] -> !cir.ptr<!rec_B>
+// CIR:        %[[B_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 1 -> 
!cir.ptr<!cir.ptr<!void>>
+// CIR:        cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : 
(!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> ()
+// CIR:        %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : 
!cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C>
+// CIR:        %[[C_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 3 -> 
!cir.ptr<!cir.ptr<!void>>
+// CIR:        cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : 
(!cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!void>>) -> ()
+// CIR:        %[[D_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, 
address_point = <index = 0, offset = 3>) : !cir.vptr
+// CIR:        %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : 
!cir.ptr<!rec_D> -> !cir.ptr<!cir.vptr>
+// CIR:        cir.store{{.*}} %[[D_VPTR]], %[[VPTR_ADDR]] : !cir.vptr, 
!cir.ptr<!cir.vptr>
+// CIR:        %[[A_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, 
address_point = <index = 2, offset = 3>) : !cir.vptr
+// CIR:        %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : 
!cir.ptr<!rec_D> nonnull [40] -> !cir.ptr<!rec_A>
+// CIR:        %[[A_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[A_ADDR]] : 
!cir.ptr<!rec_A> -> !cir.ptr<!cir.vptr>
+// CIR:        cir.store{{.*}} %[[A_VPTR]], %[[A_VPTR_ADDR]] : !cir.vptr, 
!cir.ptr<!cir.vptr>
+// CIR:        %[[C_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, 
address_point = <index = 1, offset = 3>) : !cir.vptr
+// CIR:        %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : 
!cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C>
+// CIR:        %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : 
!cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr>
+// CIR:        cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, 
!cir.ptr<!cir.vptr>
+
+// LLVM: define {{.*}} void @_ZN1DC1Ev(ptr %[[THIS_ARG:.*]])
+// LLVM:   %[[THIS_ADDR:.*]] = alloca ptr
+// LLVM:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// LLVM:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// LLVM:   %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40
+// LLVM:   call void @_ZN1AC2Ev(ptr %[[A_ADDR]])
+// LLVM:   call void @_ZN1BC2Ev(ptr %[[THIS]], ptr getelementptr inbounds nuw 
(i8, ptr @_ZTT1D, i64 8))
+// LLVM:   %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16
+// LLVM:   call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr getelementptr inbounds 
nuw (i8, ptr @_ZTT1D, i64 24))
+// LLVM:   store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), ptr 
%[[THIS]]
+// LLVM:   %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40
+// LLVM:   store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), ptr 
%[[A_ADDR]]
+// LLVM:   %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16
+// LLVM:   store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64), ptr 
%[[C_ADDR]]
+
+// OGCG: define {{.*}} void @_ZN1DC1Ev(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG:   %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG:   %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40
+// OGCG:   call void @_ZN1AC2Ev(ptr {{.*}} %[[A_ADDR]])
+// OGCG:   call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} getelementptr 
inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 1))
+// OGCG:   %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16
+// OGCG:   call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} 
getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 3))
+// OGCG:   store ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 
x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3), ptr %[[THIS]]
+// OGCG:   %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40
+// OGCG:   store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x 
ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3), ptr %[[A_ADDR]]
+// OGCG:   %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16
+// OGCG:   store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x 
ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3), ptr %[[C_ADDR]]
+
+// OGCG: define {{.*}} void @_ZN1AC2Ev(ptr {{.*}} %[[THIS_ARG:.*]])
+// OGCG:   %[[THIS_ADDR:.*]] = alloca ptr
+// OGCG:   store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]]
+// OGCG:   %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]]
+// OGCG:   store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, 
ptr @_ZTV1A, i32 0, i32 0, i32 2), ptr %[[THIS]]

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to