https://github.com/andykaylor created https://github.com/llvm/llvm-project/pull/156521
This adds the support for implicit VTT arguments in constructors. >From 256f055c5d6982bab9b51223d00c9910ff7a6e5c Mon Sep 17 00:00:00 2001 From: Andy Kaylor <akay...@nvidia.com> Date: Tue, 19 Aug 2025 13:33:27 -0700 Subject: [PATCH] [CIR] Add support for constructors with VTT parameters This adds the support for implicit VTT arguments in constructors and adds the code needed to produce and use those arguments. --- clang/include/clang/CIR/MissingFeatures.h | 4 - clang/lib/CIR/CodeGen/CIRGenBuilder.h | 33 ++ clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp | 14 + clang/lib/CIR/CodeGen/CIRGenCXXABI.h | 79 +++++ clang/lib/CIR/CodeGen/CIRGenCall.cpp | 20 +- clang/lib/CIR/CodeGen/CIRGenClass.cpp | 150 +++++++- clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 20 ++ clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp | 156 ++++++++- clang/lib/CIR/CodeGen/CIRGenModule.cpp | 15 +- clang/lib/CIR/CodeGen/CIRGenModule.h | 10 + clang/lib/CIR/CodeGen/CIRGenTypes.h | 3 +- clang/lib/CIR/CodeGen/CIRGenVTables.cpp | 46 +++ clang/lib/CIR/CodeGen/CIRGenVTables.h | 21 ++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 3 +- clang/test/CIR/CodeGen/vtt.cpp | 319 ++++++++++++++++++ clang/test/CIR/Lowering/vtt-addrpoint.cir | 2 +- 17 files changed, 857 insertions(+), 40 deletions(-) diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 70e0abe30e416..85c18212f9abd 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -132,10 +132,8 @@ struct MissingFeatures { // RecordType static bool skippedLayout() { return false; } static bool astRecordDeclAttr() { return false; } - static bool cxxSupport() { return false; } static bool recordZeroInit() { return false; } static bool zeroSizeRecordMembers() { return false; } - static bool recordLayoutVirtualBases() { return false; } // Various handling of deferred processing in CIRGenModule. static bool cgmRelease() { return false; } @@ -148,7 +146,6 @@ struct MissingFeatures { static bool cxxabiUseARMMethodPtrABI() { return false; } static bool cxxabiUseARMGuardVarABI() { return false; } static bool cxxabiAppleARM64CXXABI() { return false; } - static bool cxxabiStructorImplicitParam() { return false; } static bool isDiscreteBitFieldABI() { return false; } // Address class @@ -229,7 +226,6 @@ struct MissingFeatures { static bool globalViewIndices() { return false; } static bool globalViewIntLowering() { return false; } static bool hip() { return false; } - static bool implicitConstructorArgs() { return false; } static bool incrementProfileCounter() { return false; } static bool innermostEHScope() { return false; } static bool insertBuiltinUnpredictable() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 41207afd6a43b..5bf29fad2f3dd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -287,6 +287,10 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { cir::ConstantOp getUInt32(uint32_t c, mlir::Location loc) { return getConstantInt(loc, getUInt32Ty(), c); } + cir::ConstantOp getSInt64(uint64_t c, mlir::Location loc) { + cir::IntType sInt64Ty = getSInt64Ty(); + return create<cir::ConstantOp>(loc, cir::IntAttr::get(sInt64Ty, c)); + } // Creates constant nullptr for pointer type ty. cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc) { @@ -359,6 +363,18 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { return Address(baseAddr, destType, addr.getAlignment()); } + mlir::Value createVTTAddrPoint(mlir::Location loc, mlir::Type retTy, + mlir::Value addr, uint64_t offset) { + return cir::VTTAddrPointOp::create(*this, loc, retTy, + mlir::FlatSymbolRefAttr{}, addr, offset); + } + + mlir::Value createVTTAddrPoint(mlir::Location loc, mlir::Type retTy, + mlir::FlatSymbolRefAttr sym, uint64_t offset) { + return cir::VTTAddrPointOp::create(*this, loc, retTy, sym, mlir::Value{}, + offset); + } + /// Cast the element type of the given address to a different type, /// preserving information like the alignment. Address createElementBitCast(mlir::Location loc, Address addr, @@ -379,6 +395,23 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy { /*mem_order=*/cir::MemOrderAttr{}); } + cir::LoadOp createAlignedLoad(mlir::Location loc, mlir::Type ty, + mlir::Value ptr, llvm::MaybeAlign align) { + if (ty != mlir::cast<cir::PointerType>(ptr.getType()).getPointee()) + ptr = createPtrBitcast(ptr, ty); + uint64_t alignment = align ? align->value() : 0; + mlir::IntegerAttr alignAttr = getAlignmentAttr(alignment); + return create<cir::LoadOp>(loc, ptr, /*isDeref=*/false, + /*isVolatile=*/false, alignAttr, + /*mem_order=*/cir::MemOrderAttr{}); + } + + cir::LoadOp + createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value ptr, + clang::CharUnits align = clang::CharUnits::One()) { + return createAlignedLoad(loc, ty, ptr, align.getAsAlign()); + } + cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile = false, mlir::IntegerAttr align = {}, diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp index 33b812ac81f6e..5f1faabde22a5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.cpp @@ -22,6 +22,20 @@ using namespace clang::CIRGen; CIRGenCXXABI::~CIRGenCXXABI() {} +CIRGenCXXABI::AddedStructorArgCounts CIRGenCXXABI::addImplicitConstructorArgs( + CIRGenFunction &cgf, const CXXConstructorDecl *d, CXXCtorType type, + bool forVirtualBase, bool delegating, CallArgList &args) { + AddedStructorArgs addedArgs = + getImplicitConstructorArgs(cgf, d, type, forVirtualBase, delegating); + for (auto [idx, prefixArg] : llvm::enumerate(addedArgs.prefix)) + args.insert(args.begin() + 1 + idx, + CallArg(RValue::get(prefixArg.value), prefixArg.type)); + for (const auto &arg : addedArgs.suffix) + args.add(RValue::get(arg.value), arg.type); + return AddedStructorArgCounts(addedArgs.prefix.size(), + addedArgs.suffix.size()); +} + void CIRGenCXXABI::buildThisParam(CIRGenFunction &cgf, FunctionArgList ¶ms) { const auto *md = cast<CXXMethodDecl>(cgf.curGD.getDecl()); diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index 32c4ef28532ea..ae922599809b8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -47,11 +47,67 @@ class CIRGenCXXABI { /// constructor/destructor Decl. virtual void emitCXXStructor(clang::GlobalDecl gd) = 0; + virtual mlir::Value + getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &cgf, + Address thisAddr, const CXXRecordDecl *classDecl, + const CXXRecordDecl *baseClassDecl) = 0; + public: + /// Similar to AddedStructorArgs, but only notes the number of additional + /// arguments. + struct AddedStructorArgCounts { + unsigned prefix = 0; + unsigned suffix = 0; + AddedStructorArgCounts() = default; + AddedStructorArgCounts(unsigned p, unsigned s) : prefix(p), suffix(s) {} + static AddedStructorArgCounts withPrefix(unsigned n) { return {n, 0}; } + static AddedStructorArgCounts withSuffix(unsigned n) { return {0, n}; } + }; + + /// Additional implicit arguments to add to the beginning (Prefix) and end + /// (Suffix) of a constructor / destructor arg list. + /// + /// Note that Prefix should actually be inserted *after* the first existing + /// arg; `this` arguments always come first. + struct AddedStructorArgs { + struct Arg { + mlir::Value value; + QualType type; + }; + llvm::SmallVector<Arg, 1> prefix; + llvm::SmallVector<Arg, 1> suffix; + AddedStructorArgs() = default; + AddedStructorArgs(llvm::SmallVector<Arg, 1> p, llvm::SmallVector<Arg, 1> s) + : prefix(std::move(p)), suffix(std::move(s)) {} + static AddedStructorArgs withPrefix(llvm::SmallVector<Arg, 1> args) { + return {std::move(args), {}}; + } + static AddedStructorArgs withSuffix(llvm::SmallVector<Arg, 1> args) { + return {{}, std::move(args)}; + } + }; + + /// Build the signature of the given constructor or destructor vairant by + /// adding any required parameters. For convenience, ArgTys has been + /// initialized with the type of 'this'. + virtual AddedStructorArgCounts + buildStructorSignature(GlobalDecl gd, + llvm::SmallVectorImpl<CanQualType> &argTys) = 0; + + AddedStructorArgCounts + addImplicitConstructorArgs(CIRGenFunction &cgf, const CXXConstructorDecl *d, + CXXCtorType type, bool forVirtualBase, + bool delegating, CallArgList &args); + clang::ImplicitParamDecl *getThisDecl(CIRGenFunction &cgf) { return cgf.cxxabiThisDecl; } + virtual AddedStructorArgs + getImplicitConstructorArgs(CIRGenFunction &cgf, const CXXConstructorDecl *d, + CXXCtorType type, bool forVirtualBase, + bool delegating) = 0; + /// Emit the ABI-specific prolog for the function virtual void emitInstanceFunctionProlog(SourceLocation loc, CIRGenFunction &cgf) = 0; @@ -144,6 +200,17 @@ class CIRGenCXXABI { CIRGenFunction &cgf, const CXXRecordDecl *vtableClass, BaseSubobject base, const CXXRecordDecl *nearestVBase) = 0; + /// Insert any ABI-specific implicit parameters into the parameter list for a + /// function. This generally involves extra data for constructors and + /// destructors. + /// + /// ABIs may also choose to override the return type, which has been + /// initialized with the type of 'this' if HasThisReturn(CGF.CurGD) is true or + /// the formal return type of the function otherwise. + virtual void addImplicitStructorParams(CIRGenFunction &cgf, + clang::QualType &resTy, + FunctionArgList ¶ms) = 0; + /// Checks if ABI requires to initialize vptrs for given dynamic class. virtual bool doStructorsInitializeVPtrs(const clang::CXXRecordDecl *vtableClass) = 0; @@ -162,6 +229,18 @@ class CIRGenCXXABI { /// Gets the mangle context. clang::MangleContext &getMangleContext() { return *mangleContext; } + + clang::ImplicitParamDecl *&getStructorImplicitParamDecl(CIRGenFunction &cgf) { + return cgf.cxxStructorImplicitParamDecl; + } + + mlir::Value getStructorImplicitParamValue(CIRGenFunction &cgf) { + return cgf.cxxStructorImplicitParamValue; + } + + void setStructorImplicitParamValue(CIRGenFunction &cgf, mlir::Value val) { + cgf.cxxStructorImplicitParamValue = val; + } }; /// Creates and Itanium-family ABI diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp index 25859885296fa..2970b369a85d0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp @@ -197,7 +197,10 @@ CIRGenTypes::arrangeCXXStructorDeclaration(GlobalDecl gd) { if (passParams) appendParameterTypes(*this, argTypes, fpt); - assert(!cir::MissingFeatures::implicitConstructorArgs()); + // The structor signature may include implicit parameters. + [[maybe_unused]] CIRGenCXXABI::AddedStructorArgCounts addedArgs = + theCXXABI.buildStructorSignature(gd, argTypes); + assert(!cir::MissingFeatures::opCallCIRGenFuncInfoExtParamInfo()); RequiredArgs required = (passParams && md->isVariadic() ? RequiredArgs(argTypes.size()) @@ -324,26 +327,27 @@ arrangeFreeFunctionLikeCall(CIRGenTypes &cgt, CIRGenModule &cgm, /// Arrange a call to a C++ method, passing the given arguments. /// +/// extraPrefixArgs is the number of ABI-specific args passed after the `this` +/// parameter. /// passProtoArgs indicates whether `args` has args for the parameters in the /// given CXXConstructorDecl. const CIRGenFunctionInfo &CIRGenTypes::arrangeCXXConstructorCall( const CallArgList &args, const CXXConstructorDecl *d, CXXCtorType ctorKind, - bool passProtoArgs) { + unsigned extraPrefixArgs, unsigned extraSuffixArgs, bool passProtoArgs) { // FIXME: Kill copy. llvm::SmallVector<CanQualType, 16> argTypes; for (const auto &arg : args) argTypes.push_back(astContext.getCanonicalParamType(arg.ty)); - assert(!cir::MissingFeatures::implicitConstructorArgs()); // +1 for implicit this, which should always be args[0] - unsigned totalPrefixArgs = 1; + unsigned totalPrefixArgs = 1 + extraPrefixArgs; CanQual<FunctionProtoType> fpt = getFormalType(d); - RequiredArgs required = - passProtoArgs - ? RequiredArgs::getFromProtoWithExtraSlots(fpt, totalPrefixArgs) - : RequiredArgs::All; + RequiredArgs required = passProtoArgs + ? RequiredArgs::getFromProtoWithExtraSlots( + fpt, totalPrefixArgs + extraSuffixArgs) + : RequiredArgs::All; GlobalDecl gd(d, ctorKind); if (theCXXABI.hasThisReturn(gd)) diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 9a27932c12dff..5eff039928adc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -316,6 +316,7 @@ static Address applyNonVirtualAndVirtualOffset( assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr); // Compute the offset from the static and dynamic components. + mlir::Value baseOffset; if (!nonVirtualOffset.isZero()) { if (virtualOffset) { cgf.cgm.errorNYI( @@ -329,10 +330,35 @@ static Address applyNonVirtualAndVirtualOffset( loc, addr, baseValueTy, nonVirtualOffset.getQuantity(), assumeNotNull); } + } else { + baseOffset = virtualOffset; + } + + // Apply the base offset. cir.ptr_stride adjusts by a number of elements, + // not bytes. So the pointer must be cast to a byte pointer and back. + + mlir::Value ptr = addr.getPointer(); + mlir::Type charPtrType = cgf.cgm.UInt8PtrTy; + mlir::Value charPtr = + cgf.getBuilder().createCast(cir::CastKind::bitcast, ptr, charPtrType); + mlir::Value adjusted = cir::PtrStrideOp::create( + cgf.getBuilder(), loc, charPtrType, charPtr, baseOffset); + ptr = cgf.getBuilder().createCast(cir::CastKind::bitcast, adjusted, + ptr.getType()); + + // If we have a virtual component, the alignment of the result will + // be relative only to the known alignment of that vbase. + CharUnits alignment; + if (virtualOffset) { + assert(nearestVBase && "virtual offset without vbase?"); + alignment = cgf.cgm.getVBaseAlignment(addr.getAlignment(), derivedClass, + nearestVBase); + } else { + alignment = addr.getAlignment(); } + alignment = alignment.alignmentAtOffset(nonVirtualOffset); - cgf.cgm.errorNYI(loc, "applyNonVirtualAndVirtualOffset: virtual offset"); - return Address::invalid(); + return Address(ptr, alignment); } void CIRGenFunction::initializeVTablePointer(mlir::Location loc, @@ -351,7 +377,11 @@ void CIRGenFunction::initializeVTablePointer(mlir::Location loc, mlir::Type baseValueTy; if (cgm.getCXXABI().isVirtualOffsetNeededForVTableField(*this, vptr)) { - cgm.errorNYI(loc, "initializeVTablePointer: virtual offset for vtable"); + // We need to use the virtual base offset offset because the virtual base + // might have a different offset in the most derived class. + virtualOffset = cgm.getCXXABI().getVirtualBaseClassOffset( + loc, *this, loadCXXThisAddress(), vptr.vtableClass, vptr.nearestVBase); + nonVirtualOffset = vptr.offsetFromNearestVBase; } else { // We can just use the base offset in the complete class. nonVirtualOffset = vptr.base.getBaseOffset(); @@ -447,14 +477,14 @@ void CIRGenFunction::getVTablePointers(BaseSubobject base, const ASTRecordLayout &layout = getContext().getASTRecordLayout(vtableClass); - nextBaseDecl = nearestVBase; + nextBaseDecl = baseDecl; baseOffset = layout.getVBaseClassOffset(baseDecl); baseOffsetFromNearestVBase = CharUnits::Zero(); baseDeclIsNonVirtualPrimaryBase = false; } else { const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd); - nextBaseDecl = baseDecl; + nextBaseDecl = nearestVBase; baseOffset = base.getBaseOffset() + layout.getBaseClassOffset(baseDecl); baseOffsetFromNearestVBase = offsetFromNearestVBase + layout.getBaseClassOffset(baseDecl); @@ -511,6 +541,64 @@ void CIRGenFunction::emitInitializerForField(FieldDecl *field, LValue lhs, assert(!cir::MissingFeatures::requiresCleanups()); } +CharUnits +CIRGenModule::getDynamicOffsetAlignment(CharUnits actualBaseAlign, + const CXXRecordDecl *baseDecl, + CharUnits expectedTargetAlign) { + // If the base is an incomplete type (which is, alas, possible with + // member pointers), be pessimistic. + if (!baseDecl->isCompleteDefinition()) + return std::min(actualBaseAlign, expectedTargetAlign); + + const ASTRecordLayout &baseLayout = + getASTContext().getASTRecordLayout(baseDecl); + CharUnits expectedBaseAlign = baseLayout.getNonVirtualAlignment(); + + // If the class is properly aligned, assume the target offset is, too. + // + // This actually isn't necessarily the right thing to do --- if the + // class is a complete object, but it's only properly aligned for a + // base subobject, then the alignments of things relative to it are + // probably off as well. (Note that this requires the alignment of + // the target to be greater than the NV alignment of the derived + // class.) + // + // However, our approach to this kind of under-alignment can only + // ever be best effort; after all, we're never going to propagate + // alignments through variables or parameters. Note, in particular, + // that constructing a polymorphic type in an address that's less + // than pointer-aligned will generally trap in the constructor, + // unless we someday add some sort of attribute to change the + // assumed alignment of 'this'. So our goal here is pretty much + // just to allow the user to explicitly say that a pointer is + // under-aligned and then safely access its fields and vtables. + if (actualBaseAlign >= expectedBaseAlign) + return expectedTargetAlign; + + // Otherwise, we might be offset by an arbitrary multiple of the + // actual alignment. The correct adjustment is to take the min of + // the two alignments. + return std::min(actualBaseAlign, expectedTargetAlign); +} + +/// Return the best known alignment for a pointer to a virtual base, +/// given the alignment of a pointer to the derived class. +clang::CharUnits +CIRGenModule::getVBaseAlignment(CharUnits actualDerivedAlign, + const CXXRecordDecl *derivedClass, + const CXXRecordDecl *vbaseClass) { + // The basic idea here is that an underaligned derived pointer might + // indicate an underaligned base pointer. + + assert(vbaseClass->isCompleteDefinition()); + const ASTRecordLayout &baseLayout = + getASTContext().getASTRecordLayout(vbaseClass); + CharUnits expectedVBaseAlign = baseLayout.getNonVirtualAlignment(); + + return getDynamicOffsetAlignment(actualDerivedAlign, derivedClass, + expectedVBaseAlign); +} + /// Emit a loop to call a particular constructor for each of several members /// of an array. /// @@ -723,6 +811,52 @@ void CIRGenFunction::emitCXXDestructorCall(const CXXDestructorDecl *dd, delegating, thisAddr, thisTy); } +mlir::Value CIRGenFunction::getVTTParameter(GlobalDecl gd, bool forVirtualBase, + bool delegating) { + if (!cgm.getCXXABI().needsVTTParameter(gd)) + return nullptr; + + const CXXRecordDecl *rd = cast<CXXMethodDecl>(curFuncDecl)->getParent(); + const CXXRecordDecl *base = cast<CXXMethodDecl>(gd.getDecl())->getParent(); + + uint64_t subVTTIndex; + + if (delegating) { + cgm.errorNYI(rd->getSourceRange(), + "getVTTParameter: delegating constructor"); + return {}; + } else if (rd == base) { + // If the record matches the base, this is the complete ctor/dtor + // variant calling the base variant in a class with virtual bases. + assert(!cgm.getCXXABI().needsVTTParameter(curGD) && + "doing no-op VTT offset in base dtor/ctor?"); + assert(!forVirtualBase && "Can't have same class as virtual base!"); + subVTTIndex = 0; + } else { + const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd); + CharUnits baseOffset = forVirtualBase ? layout.getVBaseClassOffset(base) + : layout.getBaseClassOffset(base); + + subVTTIndex = + cgm.getVTables().getSubVTTIndex(rd, BaseSubobject(base, baseOffset)); + assert(subVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); + } + + mlir::Location loc = cgm.getLoc(rd->getBeginLoc()); + if (cgm.getCXXABI().needsVTTParameter(curGD)) { + // A VTT parameter was passed to the constructor, use it. + mlir::Value vtt = loadCXXVTT(); + return cgm.getBuilder().createVTTAddrPoint(loc, vtt.getType(), vtt, + subVTTIndex); + } else { + // We're the complete constructor, so get the VTT by name. + cir::GlobalOp vtt = cgm.getVTables().getAddrOfVTT(rd); + return cgm.getBuilder().createVTTAddrPoint( + loc, cgm.getBuilder().getPointerTo(cgm.VoidPtrTy), + mlir::FlatSymbolRefAttr::get(vtt.getSymNameAttr()), subVTTIndex); + } +} + Address CIRGenFunction::getAddressOfBaseClass( Address value, const CXXRecordDecl *derived, llvm::iterator_range<CastExpr::path_const_iterator> path, @@ -856,12 +990,14 @@ void CIRGenFunction::emitCXXConstructorCall( } // Insert any ABI-specific implicit constructor arguments. - assert(!cir::MissingFeatures::implicitConstructorArgs()); + CIRGenCXXABI::AddedStructorArgCounts extraArgs = + cgm.getCXXABI().addImplicitConstructorArgs(*this, d, type, forVirtualBase, + delegating, args); // Emit the call. auto calleePtr = cgm.getAddrOfCXXStructor(GlobalDecl(d, type)); const CIRGenFunctionInfo &info = cgm.getTypes().arrangeCXXConstructorCall( - args, d, type, passPrototypeArgs); + args, d, type, extraArgs.prefix, extraArgs.suffix, passPrototypeArgs); CIRGenCallee callee = CIRGenCallee::forDirect(calleePtr, GlobalDecl(d, type)); cir::CIRCallOpInterface c; emitCall(info, callee, ReturnValueSlot(), args, &c, getLoc(loc)); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index deabb94b7d129..fb782a097a2e0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -748,7 +748,7 @@ clang::QualType CIRGenFunction::buildFunctionArgList(clang::GlobalDecl gd, args.push_back(param); if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md))) - assert(!cir::MissingFeatures::cxxabiStructorImplicitParam()); + cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args); return retTy; } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index a547cd36cb35f..470904d1eb171 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -74,6 +74,11 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Value cxxThisValue = nullptr; clang::CharUnits cxxThisAlignment; + /// When generating code for a constructor or destructor, this will hold the + /// implicit argument (e.g. VTT). + ImplicitParamDecl *cxxStructorImplicitParamDecl{}; + mlir::Value cxxStructorImplicitParamValue{}; + /// The value of 'this' to sue when evaluating CXXDefaultInitExprs within this /// expression. Address cxxDefaultInitExprThis = Address::invalid(); @@ -633,6 +638,13 @@ class CIRGenFunction : public CIRGenTypeCache { llvm::iterator_range<CastExpr::path_const_iterator> path, bool nullCheckValue, SourceLocation loc); + /// GetVTTParameter - Return the VTT parameter that should be passed to a + /// base constructor/destructor with virtual bases. + /// FIXME: VTTs are Itanium ABI-specific, so the definition should move + /// to ItaniumCXXABI.cpp together with all the references to VTT. + mlir::Value getVTTParameter(GlobalDecl gd, bool forVirtualBase, + bool delegating); + LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source = AlignmentSource::Type) { return makeAddrLValue(addr, ty, LValueBaseInfo(source)); @@ -666,6 +678,14 @@ class CIRGenFunction : public CIRGenTypeCache { } Address loadCXXThisAddress(); + /// Load the VTT parameter to base constructors/destructors have virtual + /// bases. FIXME: Every place that calls LoadCXXVTT is something that needs to + /// be abstracted properly. + mlir::Value loadCXXVTT() { + assert(cxxStructorImplicitParamValue && "no VTT value for this function"); + return cxxStructorImplicitParamValue; + } + /// Convert the given pointer to a complete class to the given direct base. Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 7e57ede3193b4..bc336c2aba453 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -42,11 +42,24 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { assert(!cir::MissingFeatures::cxxabiUseARMGuardVarABI()); } + AddedStructorArgs getImplicitConstructorArgs(CIRGenFunction &cgf, + const CXXConstructorDecl *d, + CXXCtorType type, + bool forVirtualBase, + bool delegating) override; + bool needsVTTParameter(clang::GlobalDecl gd) override; + AddedStructorArgCounts + buildStructorSignature(GlobalDecl gd, + llvm::SmallVectorImpl<CanQualType> &argTys) override; + void emitInstanceFunctionProlog(SourceLocation loc, CIRGenFunction &cgf) override; + void addImplicitStructorParams(CIRGenFunction &cgf, QualType &resTy, + FunctionArgList ¶ms) override; + void emitCXXConstructors(const clang::CXXConstructorDecl *d) override; void emitCXXDestructors(const clang::CXXDestructorDecl *d) override; void emitCXXStructor(clang::GlobalDecl gd) override; @@ -78,6 +91,9 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { mlir::Value getVTableAddressPoint(BaseSubobject base, const CXXRecordDecl *vtableClass) override; + mlir::Value getVTableAddressPointInStructorWithVTT( + CIRGenFunction &cgf, const CXXRecordDecl *vtableClass, BaseSubobject base, + const CXXRecordDecl *nearestVBase); mlir::Value getVTableAddressPointInStructor( CIRGenFunction &cgf, const clang::CXXRecordDecl *vtableClass, @@ -90,6 +106,11 @@ class CIRGenItaniumCXXABI : public CIRGenCXXABI { bool doStructorsInitializeVPtrs(const CXXRecordDecl *vtableClass) override { return true; } + + mlir::Value + getVirtualBaseClassOffset(mlir::Location loc, CIRGenFunction &cgf, + Address thisAddr, const CXXRecordDecl *classDecl, + const CXXRecordDecl *baseClassDecl) override; }; } // namespace @@ -106,10 +127,13 @@ void CIRGenItaniumCXXABI::emitInstanceFunctionProlog(SourceLocation loc, /// adjustments are required, because they are all handled by thunks. setCXXABIThisValue(cgf, loadIncomingCXXThis(cgf)); - /// Classic codegen has code here to initialize the 'vtt' slot if - // getStructorImplicitParamDecl(cgf) returns a non-null value, but in the - // current implementation (of classic codegen) it never does. - assert(!cir::MissingFeatures::cxxabiStructorImplicitParam()); + /// Initialize the 'vtt' slot if needed. + if (getStructorImplicitParamDecl(cgf)) { + cir::LoadOp val = cgf.getBuilder().createLoad( + cgf.getLoc(loc), + cgf.getAddrOfLocalVar(getStructorImplicitParamDecl(cgf))); + setStructorImplicitParamValue(cgf, val); + } /// If this is a function that the ABI specifies returns 'this', initialize /// the return slot to this' at the start of the function. @@ -125,6 +149,28 @@ void CIRGenItaniumCXXABI::emitInstanceFunctionProlog(SourceLocation loc, } } +CIRGenCXXABI::AddedStructorArgCounts +CIRGenItaniumCXXABI::buildStructorSignature( + GlobalDecl gd, llvm::SmallVectorImpl<CanQualType> &argTys) { + clang::ASTContext &astContext = cgm.getASTContext(); + + // All parameters are already in place except VTT, which goes after 'this'. + // These are clang types, so we don't need to worry about sret yet. + + // Check if we need to add a VTT parameter (which has type void **). + if ((isa<CXXConstructorDecl>(gd.getDecl()) ? gd.getCtorType() == Ctor_Base + : gd.getDtorType() == Dtor_Base) && + cast<CXXMethodDecl>(gd.getDecl())->getParent()->getNumVBases() != 0) { + assert(!cir::MissingFeatures::addressSpace()); + argTys.insert(argTys.begin() + 1, + astContext.getPointerType( + CanQualType::CreateUnsafe(astContext.VoidPtrTy))); + return AddedStructorArgCounts::withPrefix(1); + } + + return AddedStructorArgCounts{}; +} + // Find out how to cirgen the complete destructor and constructor namespace { enum class StructorCIRGen { Emit, RAUW, Alias, COMDAT }; @@ -137,11 +183,8 @@ static StructorCIRGen getCIRGenToUse(CIRGenModule &cgm, // The complete and base structors are not equivalent if there are any virtual // bases, so emit separate functions. - if (md->getParent()->getNumVBases()) { - // The return value is correct here, but other support for this is NYI. - cgm.errorNYI(md->getSourceRange(), "getCIRGenToUse: virtual bases"); + if (md->getParent()->getNumVBases()) return StructorCIRGen::Emit; - } GlobalDecl aliasDecl; if (const auto *dd = dyn_cast<CXXDestructorDecl>(md)) { @@ -222,6 +265,27 @@ void CIRGenItaniumCXXABI::emitCXXStructor(GlobalDecl gd) { cgm.maybeSetTrivialComdat(*md, fn); } +void CIRGenItaniumCXXABI::addImplicitStructorParams(CIRGenFunction &cgf, + QualType &resTy, + FunctionArgList ¶ms) { + const auto *md = cast<CXXMethodDecl>(cgf.curGD.getDecl()); + assert(isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)); + + // Check if we need a VTT parameter as well. + if (needsVTTParameter(cgf.curGD)) { + ASTContext &astContext = cgm.getASTContext(); + + // FIXME: avoid the fake decl + assert(!cir::MissingFeatures::addressSpace()); + QualType t = astContext.getPointerType(astContext.VoidPtrTy); + auto *vttDecl = ImplicitParamDecl::Create( + astContext, /*DC=*/nullptr, md->getLocation(), + &astContext.Idents.get("vtt"), t, ImplicitParamKind::CXXVTT); + params.insert(params.begin() + 1, vttDecl); + getStructorImplicitParamDecl(cgf) = vttDecl; + } +} + void CIRGenItaniumCXXABI::emitCXXConstructors(const CXXConstructorDecl *d) { // Just make sure we're in sync with TargetCXXABI. assert(cgm.getTarget().getCXXABI().hasConstructorVariants()); @@ -254,6 +318,23 @@ void CIRGenItaniumCXXABI::emitCXXDestructors(const CXXDestructorDecl *d) { cgm.emitGlobal(GlobalDecl(d, Dtor_Deleting)); } +CIRGenCXXABI::AddedStructorArgs CIRGenItaniumCXXABI::getImplicitConstructorArgs( + CIRGenFunction &cgf, const CXXConstructorDecl *d, CXXCtorType type, + bool forVirtualBase, bool delegating) { + if (!needsVTTParameter(GlobalDecl(d, type))) + return AddedStructorArgs{}; + + // Insert the implicit 'vtt' argument as the second argument. Make sure to + // correctly reflect its address space, which can differ from generic on + // some targets. + mlir::Value vtt = + cgf.getVTTParameter(GlobalDecl(d, type), forVirtualBase, delegating); + QualType vttTy = + cgm.getASTContext().getPointerType(cgm.getASTContext().VoidPtrTy); + assert(!cir::MissingFeatures::addressSpace()); + return AddedStructorArgs::withPrefix({{vtt, vttTy}}); +} + /// Return whether the given global decl needs a VTT (virtual table table) /// parameter, which it does if it's a base constructor or destructor with /// virtual bases. @@ -487,8 +568,8 @@ CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer( } else { auto vtableSlotPtr = cir::VTableGetVirtualFnAddrOp::create( builder, loc, builder.getPointerTo(tyPtr), vtable, vtableIndex); - vfuncLoad = builder.createAlignedLoad( - loc, vtableSlotPtr, cgf.getPointerAlign().getQuantity()); + vfuncLoad = builder.createAlignedLoad(loc, tyPtr, vtableSlotPtr, + cgf.getPointerAlign()); } // Add !invariant.load md to virtual function load to indicate that @@ -508,6 +589,28 @@ CIRGenCallee CIRGenItaniumCXXABI::getVirtualFunctionPointer( return callee; } +mlir::Value CIRGenItaniumCXXABI::getVTableAddressPointInStructorWithVTT( + CIRGenFunction &cgf, const CXXRecordDecl *vtableClass, BaseSubobject base, + const CXXRecordDecl *nearestVBase) { + assert((base.getBase()->getNumVBases() || nearestVBase != nullptr) && + needsVTTParameter(cgf.curGD) && "This class doesn't have VTT"); + + // Get the secondary vpointer index. + uint64_t virtualPointerIndex = + cgm.getVTables().getSecondaryVirtualPointerIndex(vtableClass, base); + + /// Load the VTT. + mlir::Value vttPtr = cgf.loadCXXVTT(); + mlir::Location loc = cgf.getLoc(vtableClass->getSourceRange()); + // Calculate the address point from the VTT, and the offset may be zero. + vttPtr = cgf.getBuilder().createVTTAddrPoint(loc, vttPtr.getType(), vttPtr, + virtualPointerIndex); + // And load the address point from the VTT. + auto vptrType = cir::VPtrType::get(cgf.getBuilder().getContext()); + return cgf.getBuilder().createAlignedLoad(loc, vptrType, vttPtr, + cgf.getPointerAlign()); +} + mlir::Value CIRGenItaniumCXXABI::getVTableAddressPoint(BaseSubobject base, const CXXRecordDecl *vtableClass) { @@ -537,8 +640,8 @@ mlir::Value CIRGenItaniumCXXABI::getVTableAddressPointInStructor( if ((base.getBase()->getNumVBases() || nearestVBase != nullptr) && needsVTTParameter(cgf.curGD)) { - cgm.errorNYI(cgf.curFuncDecl->getLocation(), - "getVTableAddressPointInStructorWithVTT"); + return getVTableAddressPointInStructorWithVTT(cgf, vtableClass, base, + nearestVBase); } return getVTableAddressPoint(base, vtableClass); } @@ -549,3 +652,32 @@ bool CIRGenItaniumCXXABI::isVirtualOffsetNeededForVTableField( return false; return needsVTTParameter(cgf.curGD); } + +mlir::Value CIRGenItaniumCXXABI::getVirtualBaseClassOffset( + mlir::Location loc, CIRGenFunction &cgf, Address thisAddr, + const CXXRecordDecl *classDecl, const CXXRecordDecl *baseClassDecl) { + mlir::Value vtablePtr = cgf.getVTablePtr(loc, thisAddr, classDecl); + mlir::Value vtableBytePtr = + cgf.getBuilder().createBitcast(vtablePtr, cgm.UInt8PtrTy); + CharUnits vbaseOffsetOffset = + cgm.getItaniumVTableContext().getVirtualBaseOffsetOffset(classDecl, + baseClassDecl); + mlir::Value offsetVal = + cgf.getBuilder().getSInt64(vbaseOffsetOffset.getQuantity(), loc); + auto vbaseOffsetPtr = + cir::PtrStrideOp::create(cgf.getBuilder(), loc, cgm.UInt8PtrTy, + vtableBytePtr, offsetVal); // vbase.offset.ptr + + mlir::Value vbaseOffset; + if (cgm.getItaniumVTableContext().isRelativeLayout()) { + assert(!cir::MissingFeatures::vtableRelativeLayout()); + cgm.errorNYI(loc, "getVirtualBaseClassOffset: relative layout"); + } else { + mlir::Value offsetPtr = cgf.getBuilder().createBitcast( + vbaseOffsetPtr, cgf.getBuilder().getPointerTo(cgm.PtrDiffTy)); + vbaseOffset = cgf.getBuilder().createLoad( + loc, Address(offsetPtr, cgm.PtrDiffTy, + cgf.getPointerAlign())); // vbase.offset + } + return vbaseOffset; +} diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index c7f548498c5cb..0b3453b0e1cb5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1001,10 +1001,17 @@ cir::GlobalOp CIRGenModule::createOrReplaceCXXRuntimeVariable( mlir::SymbolTable::lookupSymbolIn(theModule, name)); if (gv) { - // There should be handling added here to check the type as assert that - // gv was a declaration if the type doesn't match and handling below - // to replace the variable if it was a declaration. - errorNYI(loc, "createOrReplaceCXXRuntimeVariable: already exists"); + // Check if the variable has the right type. + if (gv.getSymType() == ty) + return gv; + + // Because of C++ name mangling, the only way we can end up with an already + // existing global with the same name is if it has been declared extern + // "C". + assert(gv.isDeclaration() && "Declaration has wrong type!"); + + errorNYI(loc, "createOrReplaceCXXRuntimeVariable: declaration exists with " + "wrong type"); return gv; } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 4f5c7f898af8c..987275b4eac2d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -236,6 +236,16 @@ class CIRGenModule : public CIRGenTypeCache { clang::CharUnits getNaturalTypeAlignment(clang::QualType t, LValueBaseInfo *baseInfo); + /// TODO: Add TBAAAccessInfo + CharUnits getDynamicOffsetAlignment(CharUnits actualBaseAlign, + const CXXRecordDecl *baseDecl, + CharUnits expectedTargetAlign); + + /// Returns the assumed alignment of a virtual base of a class. + CharUnits getVBaseAlignment(CharUnits derivedAlign, + const CXXRecordDecl *derived, + const CXXRecordDecl *vbase); + cir::FuncOp getAddrOfCXXStructor(clang::GlobalDecl gd, const CIRGenFunctionInfo *fnInfo = nullptr, diff --git a/clang/lib/CIR/CodeGen/CIRGenTypes.h b/clang/lib/CIR/CodeGen/CIRGenTypes.h index 7af0d956e7d56..e79cdfc9f8224 100644 --- a/clang/lib/CIR/CodeGen/CIRGenTypes.h +++ b/clang/lib/CIR/CodeGen/CIRGenTypes.h @@ -175,7 +175,8 @@ class CIRGenTypes { const CIRGenFunctionInfo &arrangeCXXConstructorCall( const CallArgList &args, const clang::CXXConstructorDecl *d, - clang::CXXCtorType ctorKind, bool passProtoArgs = true); + clang::CXXCtorType ctorKind, unsigned extraPrefixArgs, + unsigned extraSuffixArgs, bool passProtoArgs = true); const CIRGenFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp index 9fbc0f67b4b92..af8f5ae2cc0a5 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.cpp @@ -392,6 +392,52 @@ void CIRGenVTables::emitVTTDefinition(cir::GlobalOp vttOp, vttOp.setComdat(true); } +uint64_t CIRGenVTables::getSubVTTIndex(const CXXRecordDecl *rd, + BaseSubobject base) { + BaseSubobjectPairTy classSubobjectPair(rd, base); + + SubVTTIndiciesMapTy::iterator it = subVTTIndicies.find(classSubobjectPair); + if (it != subVTTIndicies.end()) + return it->second; + + VTTBuilder builder(cgm.getASTContext(), rd, /*GenerateDefinition=*/false); + + for (const auto &entry : builder.getSubVTTIndices()) { + // Insert all indices. + BaseSubobjectPairTy subclassSubobjectPair(rd, entry.first); + + subVTTIndicies.insert(std::make_pair(subclassSubobjectPair, entry.second)); + } + + it = subVTTIndicies.find(classSubobjectPair); + assert(it != subVTTIndicies.end() && "Did not find index!"); + + return it->second; +} + +uint64_t CIRGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *rd, + BaseSubobject base) { + auto it = secondaryVirtualPointerIndices.find(std::make_pair(rd, base)); + + if (it != secondaryVirtualPointerIndices.end()) + return it->second; + + VTTBuilder builder(cgm.getASTContext(), rd, /*GenerateDefinition=*/false); + + // Insert all secondary vpointer indices. + for (const auto &entry : builder.getSecondaryVirtualPointerIndices()) { + std::pair<const CXXRecordDecl *, BaseSubobject> pair = + std::make_pair(rd, entry.first); + + secondaryVirtualPointerIndices.insert(std::make_pair(pair, entry.second)); + } + + it = secondaryVirtualPointerIndices.find(std::make_pair(rd, base)); + assert(it != secondaryVirtualPointerIndices.end() && "Did not find index!"); + + return it->second; +} + void CIRGenVTables::emitThunks(GlobalDecl gd) { const CXXMethodDecl *md = cast<CXXMethodDecl>(gd.getDecl())->getCanonicalDecl(); diff --git a/clang/lib/CIR/CodeGen/CIRGenVTables.h b/clang/lib/CIR/CodeGen/CIRGenVTables.h index 8d352c9949109..e19242c651034 100644 --- a/clang/lib/CIR/CodeGen/CIRGenVTables.h +++ b/clang/lib/CIR/CodeGen/CIRGenVTables.h @@ -32,6 +32,19 @@ class CIRGenVTables { /// Address points for a single vtable. using VTableAddressPointsMapTy = clang::VTableLayout::AddressPointsMapTy; + using BaseSubobjectPairTy = + std::pair<const clang::CXXRecordDecl *, clang::BaseSubobject>; + using SubVTTIndiciesMapTy = llvm::DenseMap<BaseSubobjectPairTy, uint64_t>; + + /// Contains indices into the various sub-VTTs. + SubVTTIndiciesMapTy subVTTIndicies; + + using SecondaryVirtualPointerIndicesMapTy = + llvm::DenseMap<BaseSubobjectPairTy, uint64_t>; + + /// Contains the secondary virtual pointer + /// indices. + SecondaryVirtualPointerIndicesMapTy secondaryVirtualPointerIndices; mlir::Attribute getVTableComponent(const VTableLayout &layout, unsigned componentIndex, @@ -70,6 +83,14 @@ class CIRGenVTables { /// Emit the definition of the given vtable. void emitVTTDefinition(cir::GlobalOp vttOp, cir::GlobalLinkageKind linkage, const CXXRecordDecl *rd); + /// Return the index of the sub-VTT for the base class of the given record + /// decl. + uint64_t getSubVTTIndex(const CXXRecordDecl *rd, BaseSubobject base); + + /// Return the index in the VTT where the virtual pointer for the given + /// subobject is located. + uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *rd, + BaseSubobject base); /// Emit the associated thunks for the given global decl. void emitThunks(GlobalDecl gd); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 8cbeb33a8642d..b01a4054d93bd 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2654,8 +2654,7 @@ mlir::LogicalResult CIRToLLVMVTTAddrPointOpLowering::matchAndRewrite( } offsets.push_back(adaptor.getOffset()); - eltType = mlir::IntegerType::get(resultType.getContext(), 8, - mlir::IntegerType::Signless); + eltType = mlir::LLVM::LLVMPointerType::get(rewriter.getContext()); } else { llvmAddr = getValueForVTableSymbol(op, rewriter, getTypeConverter(), op.getNameAttr(), eltType); diff --git a/clang/test/CIR/CodeGen/vtt.cpp b/clang/test/CIR/CodeGen/vtt.cpp index fcbfc07324c6d..9d88acef91eef 100644 --- a/clang/test/CIR/CodeGen/vtt.cpp +++ b/clang/test/CIR/CodeGen/vtt.cpp @@ -29,6 +29,7 @@ class C : public virtual A { class D : public B, public C { public: long d; + D(); virtual void y(); }; @@ -166,3 +167,321 @@ void D::y() {} // OGCG-SAME: [4 x ptr] [ptr inttoptr (i64 24 to ptr), ptr null, ptr null, ptr @_ZN1C1xEv], // OGCG-SAME: [4 x ptr] [ptr null, ptr inttoptr (i64 -24 to ptr), ptr null, ptr @_ZN1A1vEv] // OGCG-SAME: }, align 8 + +D::D() {} + +// In CIR, this gets emitted after the B and C constructors. See below. +// Base (C2) constructor for D + +// OGCG: define {{.*}} void @_ZN1DC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) +// OGCG: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG: %[[VTT_ADDR:.*]] = alloca ptr +// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// OGCG: %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 +// OGCG: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} %[[B_VTT]]) +// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG: %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 3 +// OGCG: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} %[[C_VTT]]) +// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// OGCG: store ptr %[[VPTR]], ptr %[[THIS]] +// OGCG: %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 5 +// OGCG: %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]] +// OGCG: %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]] +// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], i64 -24 +// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// OGCG: store ptr %[[D_VPTR]], ptr %[[BASE_PTR]] +// OGCG: %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 6 +// OGCG: %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]] +// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG: store ptr %[[C_VPTR]], ptr %[[C_ADDR]] + + +// Base (C2) constructor for B + +// CIR: cir.func {{.*}} @_ZN1BC2Ev +// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr<!rec_B> +// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr<!cir.ptr<!void>> +// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] +// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] +// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] +// CIR: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>> +// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr> +// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] +// CIR: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR: cir.store{{.*}} %[[VPTR]], %[[B_VPTR_ADDR]] +// CIR: %[[B_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>> +// CIR: %[[B_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[B_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr> +// CIR: %[[B_VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]] +// CIR: %[[B_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[B_VPTR_ADDR]] +// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr<!u8i> +// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24> +// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i> +// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i> +// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr<!s64i>, !s64i +// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr<!rec_B>), !cir.ptr<!u8i> +// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i> +// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr<!u8i>), !cir.ptr<!rec_B> +// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] +// CIR: cir.store{{.*}} %[[B_VPTR]], %[[BASE_VPTR_ADDR]] + +// LLVM: define {{.*}} void @_ZN1BC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) +// LLVM: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM: %[[VTT_ADDR:.*]] = alloca ptr +// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// LLVM: store ptr %[[VPTR]], ptr %[[THIS]] +// LLVM: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 +// LLVM: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// LLVM: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// LLVM: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// LLVM: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// LLVM: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] + +// OGCG: define {{.*}} void @_ZN1BC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) +// OGCG: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG: %[[VTT_ADDR:.*]] = alloca ptr +// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// OGCG: store ptr %[[VPTR]], ptr %[[THIS]] +// OGCG: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 +// OGCG: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// OGCG: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] + +// Base (C2) constructor for C + +// CIR: cir.func {{.*}} @_ZN1CC2Ev +// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr<!rec_C> +// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr<!cir.ptr<!void>> +// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] +// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] +// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] +// CIR: %[[VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>> +// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr> +// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] +// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR: cir.store{{.*}} %[[VPTR]], %[[C_VPTR_ADDR]] +// CIR: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>> +// CIR: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr> +// CIR: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] +// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] +// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR]] : !cir.vptr), !cir.ptr<!u8i> +// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24> +// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i> +// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i> +// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr<!s64i>, !s64i +// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr<!rec_C>), !cir.ptr<!u8i> +// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i> +// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr<!u8i>), !cir.ptr<!rec_C> +// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] +// CIR: cir.store{{.*}} %[[C_VPTR]], %[[BASE_VPTR_ADDR]] + +// LLVM: define {{.*}} void @_ZN1CC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) +// LLVM: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM: %[[VTT_ADDR:.*]] = alloca ptr +// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// LLVM: store ptr %[[VPTR]], ptr %[[THIS]] +// LLVM: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 +// LLVM: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// LLVM: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// LLVM: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// LLVM: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// LLVM: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] + +// OGCG: define {{.*}} void @_ZN1CC2Ev(ptr {{.*}} %[[THIS_ARG:.*]], ptr {{.*}} %[[VTT_ARG:.*]]) +// OGCG: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG: %[[VTT_ADDR:.*]] = alloca ptr +// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// OGCG: store ptr %[[VPTR]], ptr %[[THIS]] +// OGCG: %[[B_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i64 1 +// OGCG: %[[B_VPTR:.*]] = load ptr, ptr %[[B_VPTR_ADDR]] +// OGCG: %[[VPTR:.*]] = load ptr, ptr %[[THIS]] +// OGCG: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[VPTR]], i64 -24 +// OGCG: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// OGCG: %[[BASE_PTR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// OGCG: store ptr %[[B_VPTR]], ptr %[[BASE_PTR]] + +// Base (C2) constructor for D + +// CIR: cir.func {{.*}} @_ZN1DC2Ev +// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr<!rec_D> +// CIR-SAME: %[[VTT_ARG:.*]]: !cir.ptr<!cir.ptr<!void>> +// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR: %[[VTT_ADDR:.*]] = cir.alloca {{.*}} ["vtt", init] +// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR: cir.store %[[VTT_ARG]], %[[VTT_ADDR]] +// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR: %[[VTT:.*]] = cir.load{{.*}} %[[VTT_ADDR]] +// CIR: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [0] -> !cir.ptr<!rec_B> +// CIR: %[[B_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 1 -> !cir.ptr<!cir.ptr<!void>> +// CIR: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> () +// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C> +// CIR: %[[C_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 3 -> !cir.ptr<!cir.ptr<!void>> +// CIR: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!void>>) -> () +// CIR: %[[D_VTT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 0 -> !cir.ptr<!cir.ptr<!void>> +// CIR: %[[VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr> +// CIR: %[[VPTR:.*]] = cir.load{{.*}} %[[VPTR_ADDR]] : !cir.ptr<!cir.vptr>, !cir.vptr +// CIR: %[[D_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] +// CIR: cir.store{{.*}} %[[VPTR]], %[[D_VPTR_ADDR]] +// CIR: %[[D_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 5 -> !cir.ptr<!cir.ptr<!void>> +// CIR: %[[D_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[D_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr> +// CIR: %[[D_VPTR:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR]] : !cir.ptr<!cir.vptr>, !cir.vptr +// CIR: %[[D_VPTR_ADDR2:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_D> -> !cir.ptr<!cir.vptr> +// CIR: %[[VPTR2:.*]] = cir.load{{.*}} %[[D_VPTR_ADDR2]] : !cir.ptr<!cir.vptr>, !cir.vptr +// CIR: %[[VPTR_ADDR2:.*]] = cir.cast(bitcast, %[[VPTR2]] : !cir.vptr), !cir.ptr<!u8i> +// CIR: %[[CONST_24:.*]] = cir.const #cir.int<-24> : !s64i +// CIR: %[[BASE_OFFSET_ADDR:.*]] = cir.ptr_stride(%[[VPTR_ADDR2]] : !cir.ptr<!u8i>, %[[CONST_24]] : !s64i), !cir.ptr<!u8i> +// CIR: %[[BASE_OFFSET_PTR:.*]] = cir.cast(bitcast, %[[BASE_OFFSET_ADDR]] : !cir.ptr<!u8i>), !cir.ptr<!s64i> +// CIR: %[[BASE_OFFSET:.*]] = cir.load{{.*}} %[[BASE_OFFSET_PTR]] : !cir.ptr<!s64i>, !s64i +// CIR: %[[THIS_PTR:.*]] = cir.cast(bitcast, %[[THIS]] : !cir.ptr<!rec_D>), !cir.ptr<!u8i> +// CIR: %[[BASE_PTR:.*]] = cir.ptr_stride(%[[THIS_PTR]] : !cir.ptr<!u8i>, %[[BASE_OFFSET]] : !s64i), !cir.ptr<!u8i> +// CIR: %[[BASE_CAST:.*]] = cir.cast(bitcast, %[[BASE_PTR]] : !cir.ptr<!u8i>), !cir.ptr<!rec_D> +// CIR: %[[BASE_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[BASE_CAST]] +// CIR: cir.store{{.*}} %[[D_VPTR]], %[[BASE_VPTR_ADDR]] +// CIR: %[[C_VTT_ADDR_POINT:.*]] = cir.vtt.address_point %[[VTT]] : !cir.ptr<!cir.ptr<!void>>, offset = 6 -> !cir.ptr<!cir.ptr<!void>> +// CIR: %[[C_VPTR_ADDR:.*]] = cir.cast(bitcast, %[[C_VTT_ADDR_POINT]] : !cir.ptr<!cir.ptr<!void>>), !cir.ptr<!cir.vptr> +// CIR: %[[C_VPTR:.*]] = cir.load{{.*}} %[[C_VPTR_ADDR]] : !cir.ptr<!cir.vptr>, !cir.vptr +// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C> +// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr> +// CIR: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr> + +// LLVM: define {{.*}} void @_ZN1DC2Ev(ptr %[[THIS_ARG:.*]], ptr %[[VTT_ARG:.*]]) { +// LLVM: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM: %[[VTT_ADDR:.*]] = alloca ptr +// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM: store ptr %[[VTT_ARG]], ptr %[[VTT_ADDR]] +// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM: %[[VTT:.*]] = load ptr, ptr %[[VTT_ADDR]] +// LLVM: %[[B_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 1 +// LLVM: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr %[[B_VTT]]) +// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM: %[[C_VTT:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 3 +// LLVM: call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr %[[C_VTT]]) +// LLVM: %[[VPTR:.*]] = load ptr, ptr %[[VTT]] +// LLVM: store ptr %[[VPTR]], ptr %[[THIS]] +// LLVM: %[[D_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 5 +// LLVM: %[[D_VPTR:.*]] = load ptr, ptr %[[D_VPTR_ADDR]] +// LLVM: %[[D_VPTR_ADDR2:.*]] = load ptr, ptr %[[THIS]] +// LLVM: %[[BASE_OFFSET_ADDR:.*]] = getelementptr i8, ptr %[[D_VPTR_ADDR2]], i64 -24 +// LLVM: %[[BASE_OFFSET:.*]] = load i64, ptr %[[BASE_OFFSET_ADDR]] +// LLVM: %[[BASE_PTR:.*]] = getelementptr i8, ptr %[[THIS]], i64 %[[BASE_OFFSET]] +// LLVM: store ptr %[[D_VPTR]], ptr %[[BASE_PTR]] +// LLVM: %[[C_VPTR_ADDR:.*]] = getelementptr inbounds ptr, ptr %[[VTT]], i32 6 +// LLVM: %[[C_VPTR:.*]] = load ptr, ptr %[[C_VPTR_ADDR]] +// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM: store ptr %[[C_VPTR]], ptr %[[C_ADDR]] + +// The C2 constructor for D gets emitted earlier in OGCG, see above. + +// Base (C2) constructor for A + +// CIR: cir.func {{.*}} @_ZN1AC2Ev +// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr<!rec_A> +// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR: %[[VPTR:.*]] = cir.vtable.address_point(@_ZTV1A, address_point = <index = 0, offset = 2>) : !cir.vptr +// CIR: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_A> -> !cir.ptr<!cir.vptr> +// CIR: cir.store{{.*}} %[[VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr> + +// LLVM: define {{.*}} void @_ZN1AC2Ev(ptr %[[THIS_ARG:.*]]) { +// LLVM: %[[THIS_ADDR:.*]] = alloca ptr, i64 1, align 8 +// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]], align 8 +// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1A, i64 16), ptr %[[THIS]] + +// The C2 constructor for A gets emitted later in OGCG, see below. + +// Complete (C1) constructor for D + +// CIR: cir.func {{.*}} @_ZN1DC1Ev +// CIR-SAME: %[[THIS_ARG:.*]]: !cir.ptr<!rec_D> +// CIR: %[[THIS_ADDR:.*]] = cir.alloca {{.*}} ["this", init] +// CIR: cir.store %[[THIS_ARG]], %[[THIS_ADDR]] +// CIR: %[[THIS:.*]] = cir.load %[[THIS_ADDR]] +// CIR: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [40] -> !cir.ptr<!rec_A> +// CIR: cir.call @_ZN1AC2Ev(%[[A_ADDR]]) nothrow : (!cir.ptr<!rec_A>) -> () +// CIR: %[[B_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [0] -> !cir.ptr<!rec_B> +// CIR: %[[B_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 1 -> !cir.ptr<!cir.ptr<!void>> +// CIR: cir.call @_ZN1BC2Ev(%[[B_ADDR]], %[[B_VTT]]) nothrow : (!cir.ptr<!rec_B>, !cir.ptr<!cir.ptr<!void>>) -> () +// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C> +// CIR: %[[C_VTT:.*]] = cir.vtt.address_point @_ZTT1D, offset = 3 -> !cir.ptr<!cir.ptr<!void>> +// CIR: cir.call @_ZN1CC2Ev(%[[C_ADDR]], %[[C_VTT]]) nothrow : (!cir.ptr<!rec_C>, !cir.ptr<!cir.ptr<!void>>) -> () +// CIR: %[[D_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = <index = 0, offset = 3>) : !cir.vptr +// CIR: %[[VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[THIS]] : !cir.ptr<!rec_D> -> !cir.ptr<!cir.vptr> +// CIR: cir.store{{.*}} %[[D_VPTR]], %[[VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr> +// CIR: %[[A_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = <index = 2, offset = 3>) : !cir.vptr +// CIR: %[[A_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [40] -> !cir.ptr<!rec_A> +// CIR: %[[A_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[A_ADDR]] : !cir.ptr<!rec_A> -> !cir.ptr<!cir.vptr> +// CIR: cir.store{{.*}} %[[A_VPTR]], %[[A_VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr> +// CIR: %[[C_VPTR:.*]] = cir.vtable.address_point(@_ZTV1D, address_point = <index = 1, offset = 3>) : !cir.vptr +// CIR: %[[C_ADDR:.*]] = cir.base_class_addr %[[THIS]] : !cir.ptr<!rec_D> nonnull [16] -> !cir.ptr<!rec_C> +// CIR: %[[C_VPTR_ADDR:.*]] = cir.vtable.get_vptr %[[C_ADDR]] : !cir.ptr<!rec_C> -> !cir.ptr<!cir.vptr> +// CIR: cir.store{{.*}} %[[C_VPTR]], %[[C_VPTR_ADDR]] : !cir.vptr, !cir.ptr<!cir.vptr> + +// LLVM: define {{.*}} void @_ZN1DC1Ev(ptr %[[THIS_ARG:.*]]) +// LLVM: %[[THIS_ADDR:.*]] = alloca ptr +// LLVM: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// LLVM: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// LLVM: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 +// LLVM: call void @_ZN1AC2Ev(ptr %[[A_ADDR]]) +// LLVM: call void @_ZN1BC2Ev(ptr %[[THIS]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 8)) +// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM: call void @_ZN1CC2Ev(ptr %[[C_ADDR]], ptr getelementptr inbounds nuw (i8, ptr @_ZTT1D, i64 24)) +// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 24), ptr %[[THIS]] +// LLVM: %[[A_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 40 +// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 96), ptr %[[A_ADDR]] +// LLVM: %[[C_ADDR:.*]] = getelementptr i8, ptr %[[THIS]], i32 16 +// LLVM: store ptr getelementptr inbounds nuw (i8, ptr @_ZTV1D, i64 64), ptr %[[C_ADDR]] + +// OGCG: define {{.*}} void @_ZN1DC1Ev(ptr {{.*}} %[[THIS_ARG:.*]]) +// OGCG: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40 +// OGCG: call void @_ZN1AC2Ev(ptr {{.*}} %[[A_ADDR]]) +// OGCG: call void @_ZN1BC2Ev(ptr {{.*}} %[[THIS]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 1)) +// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG: call void @_ZN1CC2Ev(ptr {{.*}} %[[C_ADDR]], ptr {{.*}} getelementptr inbounds ([7 x ptr], ptr @_ZTT1D, i64 0, i64 3)) +// OGCG: store ptr getelementptr inbounds inrange(-24, 16) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 0, i32 3), ptr %[[THIS]] +// OGCG: %[[A_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 40 +// OGCG: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 2, i32 3), ptr %[[A_ADDR]] +// OGCG: %[[C_ADDR:.*]] = getelementptr inbounds i8, ptr %[[THIS]], i64 16 +// OGCG: store ptr getelementptr inbounds inrange(-24, 8) ({ [5 x ptr], [4 x ptr], [4 x ptr] }, ptr @_ZTV1D, i32 0, i32 1, i32 3), ptr %[[C_ADDR]] + +// OGCG: define {{.*}} void @_ZN1AC2Ev(ptr {{.*}} %[[THIS_ARG:.*]]) +// OGCG: %[[THIS_ADDR:.*]] = alloca ptr +// OGCG: store ptr %[[THIS_ARG]], ptr %[[THIS_ADDR]] +// OGCG: %[[THIS:.*]] = load ptr, ptr %[[THIS_ADDR]] +// OGCG: store ptr getelementptr inbounds inrange(-16, 8) ({ [3 x ptr] }, ptr @_ZTV1A, i32 0, i32 0, i32 2), ptr %[[THIS]] diff --git a/clang/test/CIR/Lowering/vtt-addrpoint.cir b/clang/test/CIR/Lowering/vtt-addrpoint.cir index a3e7271f7446e..96dc27d991cd4 100644 --- a/clang/test/CIR/Lowering/vtt-addrpoint.cir +++ b/clang/test/CIR/Lowering/vtt-addrpoint.cir @@ -32,7 +32,7 @@ module { } // CHECK: define{{.*}} void @_ZN1CC2Ev -// CHECK: %[[VTT:.*]] = getelementptr inbounds i8, ptr %{{.*}}, i32 1 +// CHECK: %[[VTT:.*]] = getelementptr inbounds ptr, ptr %{{.*}}, i32 1 // CHECK: call void @_ZN1BC2Ev(ptr %{{.*}}, ptr %[[VTT]]) cir.global linkonce_odr dso_local @_ZTV1C = #cir.vtable<{#cir.const_array<[#cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>, #cir.ptr<null> : !cir.ptr<!u8i>]> : !cir.array<!cir.ptr<!u8i> x 3>}> : !rec_anon_struct {alignment = 8 : i64} _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits