https://github.com/vasu-the-sharma created https://github.com/llvm/llvm-project/pull/175478
None >From f10bbba299bfcda6ac69af7aa3b7e11107484d72 Mon Sep 17 00:00:00 2001 From: vasu-ibm <[email protected]> Date: Thu, 8 Jan 2026 11:49:39 -0500 Subject: [PATCH 1/2] add coverage ubsan-aggregate-null-align.c --- .../test/CodeGen/ubsan-aggregate-null-align.c | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 clang/test/CodeGen/ubsan-aggregate-null-align.c diff --git a/clang/test/CodeGen/ubsan-aggregate-null-align.c b/clang/test/CodeGen/ubsan-aggregate-null-align.c new file mode 100644 index 0000000000000..7ca9d32c3305b --- /dev/null +++ b/clang/test/CodeGen/ubsan-aggregate-null-align.c @@ -0,0 +1,48 @@ +// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - \ +// RUN: -fsanitize=null,alignment | FileCheck %s --check-prefix=CHECK-SANITIZE +// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - \ +// RUN: | FileCheck %s --check-prefix=CHECK-NO-SANITIZE + +struct Small { int x; }; +struct Container { struct Small inner; }; + +// CHECK-SANITIZE-LABEL: define {{.*}}void @test_direct_assign_ptr( +// CHECK-SANITIZE: %[[D:.*]] = load ptr, ptr %dest.addr +// CHECK-SANITIZE: %[[S:.*]] = load ptr, ptr %src.addr +// CHECK-SANITIZE: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[D]], ptr align 4 %[[S]], i64 4, i1 false) +// +// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_direct_assign_ptr( +// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch +void test_direct_assign_ptr(struct Small *dest, struct Small *src) { + *dest = *src; +} + +// CHECK-SANITIZE-LABEL: define {{.*}}void @test_null_dest( +// CHECK-SANITIZE: %[[D:.*]] = load ptr, ptr %dest +// CHECK-SANITIZE: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %[[D]], ptr {{.*}}, i64 4, i1 false) +// +// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_null_dest( +// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch +void test_null_dest(struct Small *src) { + struct Small *dest = 0; + *dest = *src; +} + +// CHECK-SANITIZE-LABEL: define {{.*}}void @test_nested_struct( +// CHECK-SANITIZE: %[[VAL1:.*]] = icmp ne ptr %[[C:.*]], null +// CHECK-SANITIZE: br i1 %{{.*}}, label %cont, label %handler.type_mismatch +// +// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_nested_struct( +// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch +void test_nested_struct(struct Container *c, struct Small *s) { + c->inner = *s; +} + +// CHECK-SANITIZE-LABEL: define {{.*}}void @test_comma_operator( +// CHECK-SANITIZE: call void @llvm.memcpy.p0.p0.i64(ptr align 4 %{{.*}}, ptr align 4 %{{.*}}, i64 4, i1 false) +// +// CHECK-NO-SANITIZE-LABEL: define {{.*}}void @test_comma_operator( +// CHECK-NO-SANITIZE-NOT: @__ubsan_handle_type_mismatch +void test_comma_operator(struct Small *dest, struct Small *src) { + *dest = (0, *src); +} >From 52a66575e018e3a5fe2467858c84a94b3af9390d Mon Sep 17 00:00:00 2001 From: vasu-ibm <[email protected]> Date: Sun, 11 Jan 2026 21:18:28 -0500 Subject: [PATCH 2/2] update and resolve clang-format errors --- clang/lib/CodeGen/CGExprAgg.cpp | 179 ++++++------ clang/lib/CodeGen/CGExprCXX.cpp | 487 +++++++++++++++----------------- 2 files changed, 314 insertions(+), 352 deletions(-) diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp index 7cc4d6c8f06f6..28136313a0ca4 100644 --- a/clang/lib/CodeGen/CGExprAgg.cpp +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -50,11 +50,13 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> { bool IsResultUnused; AggValueSlot EnsureSlot(QualType T) { - if (!Dest.isIgnored()) return Dest; + if (!Dest.isIgnored()) + return Dest; return CGF.CreateAggTemp(T, "agg.tmp.ensured"); } void EnsureDest(QualType T) { - if (!Dest.isIgnored()) return; + if (!Dest.isIgnored()) + return; Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured"); } @@ -72,8 +74,8 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> { public: AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused) - : CGF(cgf), Builder(CGF.Builder), Dest(Dest), - IsResultUnused(IsResultUnused) { } + : CGF(cgf), Builder(CGF.Builder), Dest(Dest), + IsResultUnused(IsResultUnused) {} //===--------------------------------------------------------------------===// // Utilities @@ -114,9 +116,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> { StmtVisitor<AggExprEmitter>::Visit(E); } - void VisitStmt(Stmt *S) { - CGF.ErrorUnsupported(S, "aggregate expression"); - } + void VisitStmt(Stmt *S) { CGF.ErrorUnsupported(S, "aggregate expression"); } void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { Visit(GE->getResultExpr()); @@ -157,9 +157,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> { void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { EmitAggLoadOfLValue(E); } - void VisitPredefinedExpr(const PredefinedExpr *E) { - EmitAggLoadOfLValue(E); - } + void VisitPredefinedExpr(const PredefinedExpr *E) { EmitAggLoadOfLValue(E); } // Operators. void VisitCastExpr(CastExpr *E); @@ -175,9 +173,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> { } void VisitObjCMessageExpr(ObjCMessageExpr *E); - void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { - EmitAggLoadOfLValue(E); - } + void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { EmitAggLoadOfLValue(E); } void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E); void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO); @@ -189,7 +185,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> { void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, llvm::Value *outerBegin = nullptr); void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); - void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing. + void VisitNoInitExpr(NoInitExpr *E) {} // Do nothing. void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); Visit(DAE->getExpr()); @@ -244,7 +240,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> { Visit(E->getSelectedExpr()); } }; -} // end anonymous namespace. +} // end anonymous namespace. //===----------------------------------------------------------------------===// // Utilities @@ -393,10 +389,8 @@ void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest, if (dest.requiresGCollection()) { CharUnits sz = dest.getPreferredSize(CGF.getContext(), type); llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity()); - CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, - dest.getAddress(), - src.getAddress(), - size); + CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, dest.getAddress(), + src.getAddress(), size); return; } @@ -411,8 +405,8 @@ void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest, /// Emit the initializer for a std::initializer_list initialized with a /// real initializer list. -void -AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { +void AggExprEmitter::VisitCXXStdInitializerListExpr( + CXXStdInitializerListExpr *E) { // Emit an array containing the elements. The array is externally destructed // if the std::initializer_list object is. ASTContext &Ctx = CGF.getContext(); @@ -454,7 +448,7 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { ArrayType->getElementType()) && "Expected std::initializer_list second field to be const E *"); llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0); - llvm::Value *IdxEnd[] = { Zero, Size }; + llvm::Value *IdxEnd[] = {Zero, Size}; llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP( ArrayPtr.getElementType(), ArrayPtr.emitRawPointer(CGF), IdxEnd, "arrayend"); @@ -571,7 +565,7 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, CGF.getContext().getAsArrayType(ArrayQTy)->getElementType(); CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); CharUnits elementAlign = - DestPtr.getAlignment().alignmentOfArrayElement(elementSize); + DestPtr.getAlignment().alignmentOfArrayElement(elementSize); llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType); // Consider initializing the array by copying from a global. For this to be @@ -686,7 +680,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, llvmElementType, element, llvm::ConstantInt::get(CGF.SizeTy, NumInitElements), "arrayinit.start"); - if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit); + if (endOfInit.isValid()) + Builder.CreateStore(element, endOfInit); } // Compute the end of the array. @@ -700,7 +695,7 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, // Jump into the body. CGF.EmitBlock(bodyBB); llvm::PHINode *currentElement = - Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); + Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); currentElement->addIncoming(element, entryBB); // Emit the actual filler expression. @@ -724,11 +719,12 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, llvmElementType, currentElement, one, "arrayinit.next"); // Tell the EH cleanup that we finished with the last element. - if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit); + if (endOfInit.isValid()) + Builder.CreateStore(nextElement, endOfInit); // Leave the loop if we're done. - llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, - "arrayinit.done"); + llvm::Value *done = + Builder.CreateICmpEQ(nextElement, end, "arrayinit.done"); llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); Builder.CreateCondBr(done, endBB, bodyBB); currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); @@ -741,7 +737,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, // Visitor Methods //===----------------------------------------------------------------------===// -void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){ +void AggExprEmitter::VisitMaterializeTemporaryExpr( + MaterializeTemporaryExpr *E) { Visit(E->getSubExpr()); } @@ -753,8 +750,7 @@ void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e)); } -void -AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { +void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { if (Dest.isPotentiallyAliased()) { // Just emit a load of the lvalue + a copy, because our compound literal // might alias the destination. @@ -798,8 +794,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { case CK_Dynamic: { // FIXME: Can this actually happen? We have no test coverage for it. assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); - LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(), - CodeGenFunction::TCK_Load); + LValue LV = + CGF.EmitCheckedLValue(E->getSubExpr(), CodeGenFunction::TCK_Load); // FIXME: Do we also need to handle property references here? if (LV.isSimple()) CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E)); @@ -848,7 +844,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { case CK_BaseToDerived: case CK_UncheckedDerivedToBase: { llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: " - "should have been unpacked before we got here"); + "should have been unpacked before we got here"); } case CK_NonAtomicToAtomic: @@ -858,11 +854,12 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { // Determine the atomic and value types. QualType atomicType = E->getSubExpr()->getType(); QualType valueType = E->getType(); - if (isToAtomic) std::swap(atomicType, valueType); + if (isToAtomic) + std::swap(atomicType, valueType); assert(atomicType->isAtomicType()); - assert(CGF.getContext().hasSameUnqualifiedType(valueType, - atomicType->castAs<AtomicType>()->getValueType())); + assert(CGF.getContext().hasSameUnqualifiedType( + valueType, atomicType->castAs<AtomicType>()->getValueType())); // Just recurse normally if we're ignoring the result or the // atomic type doesn't change representation. @@ -871,14 +868,14 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { } CastKind peepholeTarget = - (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic); + (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic); // These two cases are reverses of each other; try to peephole them. if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) { assert(CGF.getContext().hasSameUnqualifiedType(op->getType(), E->getType()) && - "peephole significantly changed types?"); + "peephole significantly changed types?"); return Visit(op); } @@ -895,13 +892,11 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { // Build a GEP to refer to the subobject. Address valueAddr = CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0); - valueDest = AggValueSlot::forAddr(valueAddr, - valueDest.getQualifiers(), - valueDest.isExternallyDestructed(), - valueDest.requiresGCollection(), - valueDest.isPotentiallyAliased(), - AggValueSlot::DoesNotOverlap, - AggValueSlot::IsZeroed); + valueDest = AggValueSlot::forAddr( + valueAddr, valueDest.getQualifiers(), + valueDest.isExternallyDestructed(), valueDest.requiresGCollection(), + valueDest.isPotentiallyAliased(), AggValueSlot::DoesNotOverlap, + AggValueSlot::IsZeroed); } CGF.EmitAggExpr(E->getSubExpr(), valueDest); @@ -911,7 +906,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { // Otherwise, we're converting an atomic type to a non-atomic type. // Make an atomic temporary, emit into that, and then copy the value out. AggValueSlot atomicSlot = - CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp"); + CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp"); CGF.EmitAggExpr(E->getSubExpr(), atomicSlot); Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0); @@ -919,7 +914,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) { return EmitFinalDestCopy(valueType, rvalue); } case CK_AddressSpaceConversion: - return Visit(E->getSubExpr()); + return Visit(E->getSubExpr()); case CK_LValueToRValue: // If we're loading from a volatile type, force the destination @@ -1054,9 +1049,8 @@ void AggExprEmitter::VisitCallExpr(const CallExpr *E) { return; } - withReturnValueSlot(E, [&](ReturnValueSlot Slot) { - return CGF.EmitCallExpr(E, Slot); - }); + withReturnValueSlot( + E, [&](ReturnValueSlot Slot) { return CGF.EmitCallExpr(E, Slot); }); } void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { @@ -1219,7 +1213,7 @@ void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { } void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( - const BinaryOperator *E) { + const BinaryOperator *E) { LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); EmitFinalDestCopy(E->getType(), LV); } @@ -1252,37 +1246,36 @@ static bool isBlockVarRef(const Expr *E) { // FIXME: pointer arithmetic? return false; - // Check both sides of a conditional operator. - } else if (const AbstractConditionalOperator *op - = dyn_cast<AbstractConditionalOperator>(E)) { - return isBlockVarRef(op->getTrueExpr()) - || isBlockVarRef(op->getFalseExpr()); + // Check both sides of a conditional operator. + } else if (const AbstractConditionalOperator *op = + dyn_cast<AbstractConditionalOperator>(E)) { + return isBlockVarRef(op->getTrueExpr()) || + isBlockVarRef(op->getFalseExpr()); - // OVEs are required to support BinaryConditionalOperators. - } else if (const OpaqueValueExpr *op - = dyn_cast<OpaqueValueExpr>(E)) { + // OVEs are required to support BinaryConditionalOperators. + } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(E)) { if (const Expr *src = op->getSourceExpr()) return isBlockVarRef(src); - // Casts are necessary to get things like (*(int*)&var) = foo(). - // We don't really care about the kind of cast here, except - // we don't want to look through l2r casts, because it's okay - // to get the *value* in a __block variable. + // Casts are necessary to get things like (*(int*)&var) = foo(). + // We don't really care about the kind of cast here, except + // we don't want to look through l2r casts, because it's okay + // to get the *value* in a __block variable. } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) { if (cast->getCastKind() == CK_LValueToRValue) return false; return isBlockVarRef(cast->getSubExpr()); - // Handle unary operators. Again, just aggressively look through - // it, ignoring the operation. + // Handle unary operators. Again, just aggressively look through + // it, ignoring the operation. } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) { return isBlockVarRef(uop->getSubExpr()); - // Look into the base of a field access. + // Look into the base of a field access. } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) { return isBlockVarRef(mem->getBase()); - // Look into the base of a subscript. + // Look into the base of a subscript. } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) { return isBlockVarRef(sub->getBase()); } @@ -1295,8 +1288,8 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { // For an assignment to work, the value on the right has // to be compatible with the value on the left. assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), - E->getRHS()->getType()) - && "Invalid assignment"); + E->getRHS()->getType()) && + "Invalid assignment"); // If the LHS might be a __block variable, and the RHS can // potentially cause a block copy, we need to evaluate the RHS first @@ -1344,8 +1337,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()), AggValueSlot::IsAliased, AggValueSlot::MayOverlap); // A non-volatile aggregate destination might have volatile member. - if (!LHSSlot.isVolatile() && - CGF.hasVolatileMember(E->getLHS()->getType())) + if (!LHSSlot.isVolatile() && CGF.hasVolatileMember(E->getLHS()->getType())) LHSSlot.setVolatile(true); CGF.EmitAggExpr(E->getRHS(), LHSSlot); @@ -1359,8 +1351,8 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { E->getType()); } -void AggExprEmitter:: -VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { +void AggExprEmitter::VisitAbstractConditionalOperator( + const AbstractConditionalOperator *E) { llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); @@ -1445,8 +1437,7 @@ void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); } -void -AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { +void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { AggValueSlot Slot = EnsureSlot(E->getType()); CGF.EmitCXXConstructExpr(E, Slot); } @@ -1454,13 +1445,12 @@ AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { void AggExprEmitter::VisitCXXInheritedCtorInitExpr( const CXXInheritedCtorInitExpr *E) { AggValueSlot Slot = EnsureSlot(E->getType()); - CGF.EmitInheritedCXXConstructorCall( - E->getConstructor(), E->constructsVBase(), Slot.getAddress(), - E->inheritedFromVBase(), E); + CGF.EmitInheritedCXXConstructorCall(E->getConstructor(), E->constructsVBase(), + Slot.getAddress(), + E->inheritedFromVBase(), E); } -void -AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { +void AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { AggValueSlot Slot = EnsureSlot(E->getType()); LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType()); @@ -1644,9 +1634,7 @@ static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { return false; } - -void -AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { +void AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { QualType type = LV.getType(); // FIXME: Ignore result? // FIXME: Are initializers affected by volatile? @@ -1789,10 +1777,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( Dest.getAddress(), CXXRD, BaseRD, /*isBaseVirtual*/ false); AggValueSlot AggSlot = AggValueSlot::forAddr( - V, Qualifiers(), - AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, + V, Qualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual())); CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot); @@ -1888,8 +1874,8 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr( // Push a destructor if necessary. // FIXME: if we have an array of structures, all explicitly // initialized, we can end up pushing a linear number of cleanups. - if (QualType::DestructionKind dtorKind - = field->getType().isDestructedType()) { + if (QualType::DestructionKind dtorKind = + field->getType().isDestructedType()) { assert(LV.isSimple()); if (dtorKind) { CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(), @@ -2044,7 +2030,8 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, CGF.DeactivateCleanupBlock(cleanup, index); } -void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) { +void AggExprEmitter::VisitDesignatedInitUpdateExpr( + DesignatedInitUpdateExpr *E) { AggValueSlot Dest = EnsureSlot(E->getType()); LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); @@ -2065,7 +2052,8 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { E = E->IgnoreParenNoopCasts(CGF.getContext()); // 0 and 0.0 won't require any non-zero stores! - if (isSimpleZero(E, CGF)) return CharUnits::Zero(); + if (isSimpleZero(E, CGF)) + return CharUnits::Zero(); // If this is an initlist expr, sum up the size of sizes of the (present) // elements. If this is something weird, assume the whole thing is non-zero. @@ -2146,7 +2134,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, // Check to see if over 3/4 of the initializer are known to be zero. If so, // we prefer to emit memset + individual stores for the rest. CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); - if (NumNonZeroBytes*4 > Size) + if (NumNonZeroBytes * 4 > Size) return; // Okay, it seems like a good idea to use an initial memset, emit the call. @@ -2159,9 +2147,6 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, Slot.setZeroed(); } - - - /// EmitAggExpr - Emit the computation of the specified expression of aggregate /// type. The result is computed into DestPtr. Note that if DestPtr is null, /// the value of the aggregate expression is not needed. If VolatileDest is @@ -2175,7 +2160,7 @@ void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) { // Optimize the slot if possible. CheckAggExprForMemSetUse(Slot, E, *this); - AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E)); + AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr *>(E)); } LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index 074c124dbf01b..78c10ec757bca 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -30,7 +30,7 @@ struct MemberCallInfo { // Number of prefix arguments for the call. Ignores the `this` pointer. unsigned PrefixSize; }; -} +} // namespace static MemberCallInfo commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, GlobalDecl GD, @@ -125,8 +125,8 @@ RValue CodeGenFunction::EmitCXXDestructorCall( CE ? CE->getExprLoc() : SourceLocation{}); } -RValue CodeGenFunction::EmitCXXPseudoDestructorExpr( - const CXXPseudoDestructorExpr *E) { +RValue +CodeGenFunction::EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { QualType DestroyedType = E->getDestroyedType(); if (DestroyedType.hasStrongOrWeakObjCLifetime()) { // Automatic Reference Counting: @@ -155,9 +155,9 @@ RValue CodeGenFunction::EmitCXXPseudoDestructorExpr( break; case Qualifiers::OCL_Strong: - EmitARCRelease(Builder.CreateLoad(BaseValue, - DestroyedType.isVolatileQualified()), - ARCPreciseLifetime); + EmitARCRelease( + Builder.CreateLoad(BaseValue, DestroyedType.isVolatileQualified()), + ARCPreciseLifetime); break; case Qualifiers::OCL_Weak: @@ -272,7 +272,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( RtlArgs = &RtlArgStorage; EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(), drop_begin(CE->arguments(), 1), CE->getDirectCallee(), - /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft); + /*ParamsToSkip*/ 0, EvaluationOrder::ForceRightToLeft); } } } @@ -316,9 +316,8 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( // It's important that we use the result of EmitLValue here rather than // emitting call arguments, in order to preserve TBAA information from // the RHS. - LValue RHS = isa<CXXOperatorCallExpr>(CE) - ? TrivialAssignmentRHS - : EmitLValue(*CE->arg_begin()); + LValue RHS = isa<CXXOperatorCallExpr>(CE) ? TrivialAssignmentRHS + : EmitLValue(*CE->arg_begin()); EmitAggregateAssign(This, RHS, CE->getType()); return RValue::get(This.getPointer(*this)); } @@ -469,9 +468,8 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, // Ask the ABI to load the callee. Note that This is modified. llvm::Value *ThisPtrForCall = nullptr; - CGCallee Callee = - CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This, - ThisPtrForCall, MemFnPtr, MPT); + CGCallee Callee = CGM.getCXXABI().EmitLoadOfMemberFunctionPointer( + *this, BO, This, ThisPtrForCall, MemFnPtr, MPT); CallArgList Args; @@ -584,9 +582,9 @@ static void EmitNullBaseClassInitialization(CodeGenFunction &CGF, StoreSizeVal); } - // Otherwise, just memset the whole thing to zero. This is legal - // because in LLVM, all default initializers (other than the ones we just - // handled above) are guaranteed to have a bit pattern of all zeros. + // Otherwise, just memset the whole thing to zero. This is legal + // because in LLVM, all default initializers (other than the ones we just + // handled above) are guaranteed to have a bit pattern of all zeros. } else { for (std::pair<CharUnits, CharUnits> Store : Stores) { CharUnits StoreOffset = Store.first; @@ -599,9 +597,8 @@ static void EmitNullBaseClassInitialization(CodeGenFunction &CGF, } } -void -CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, - AggValueSlot Dest) { +void CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, + AggValueSlot Dest) { assert(!Dest.isIgnored() && "Must have a destination!"); const CXXConstructorDecl *CD = E->getConstructor(); @@ -642,8 +639,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, return; } - if (const ArrayType *arrayType - = getContext().getAsArrayType(E->getType())) { + if (const ArrayType *arrayType = getContext().getAsArrayType(E->getType())) { EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E, Dest.isSanitizerChecked()); } else { @@ -668,10 +664,10 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, case CXXConstructionKind::NonVirtualBase: Type = Ctor_Base; - } + } - // Call the constructor. - EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); + // Call the constructor. + EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E); } } @@ -681,7 +677,7 @@ void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, Exp = E->getSubExpr(); assert(isa<CXXConstructExpr>(Exp) && "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr"); - const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp); + const CXXConstructExpr *E = cast<CXXConstructExpr>(Exp); const CXXConstructorDecl *CD = E->getConstructor(); RunCleanupsScope Scope(*this); @@ -692,8 +688,8 @@ void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, if (E->requiresZeroInitialization()) EmitNullInitialization(Dest, E->getType()); - assert(!getContext().getAsConstantArrayType(E->getType()) - && "EmitSynthesizedCXXCopyCtor - Copied-in Array"); + assert(!getContext().getAsConstantArrayType(E->getType()) && + "EmitSynthesizedCXXCopyCtor - Copied-in Array"); EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E); } @@ -719,8 +715,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, if (!e->isArray()) { CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); - sizeWithoutCookie - = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity()); + sizeWithoutCookie = + llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity()); return sizeWithoutCookie; } @@ -746,16 +742,16 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, // size_t. That's just a gloss, though, and it's wrong in one // important way: if the count is negative, it's an error even if // the cookie size would bring the total size >= 0. - bool isSigned - = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType(); - llvm::IntegerType *numElementsType - = cast<llvm::IntegerType>(numElements->getType()); + bool isSigned = + (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType(); + llvm::IntegerType *numElementsType = + cast<llvm::IntegerType>(numElements->getType()); unsigned numElementsWidth = numElementsType->getBitWidth(); // Compute the constant factor. llvm::APInt arraySizeMultiplier(sizeWidth, 1); - while (const ConstantArrayType *CAT - = CGF.getContext().getAsConstantArrayType(type)) { + while (const ConstantArrayType *CAT = + CGF.getContext().getAsConstantArrayType(type)) { type = CAT->getElementType(); arraySizeMultiplier *= CAT->getSize(); } @@ -770,7 +766,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, // If someone is doing 'new int[42]' there is no need to do a dynamic check. // Don't bloat the -O0 code. if (llvm::ConstantInt *numElementsC = - dyn_cast<llvm::ConstantInt>(numElements)) { + dyn_cast<llvm::ConstantInt>(numElements)) { const llvm::APInt &count = numElementsC->getValue(); bool hasAnyOverflow = false; @@ -797,13 +793,13 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, // Scale numElements by that. This might overflow, but we don't // care because it only overflows if allocationSize does, too, and // if that overflows then we shouldn't use this. - numElements = llvm::ConstantInt::get(CGF.SizeTy, - adjustedCount * arraySizeMultiplier); + numElements = + llvm::ConstantInt::get(CGF.SizeTy, adjustedCount * arraySizeMultiplier); // Compute the size before cookie, and track whether it overflowed. bool overflow; - llvm::APInt allocationSize - = adjustedCount.umul_ov(typeSizeMultiplier, overflow); + llvm::APInt allocationSize = + adjustedCount.umul_ov(typeSizeMultiplier, overflow); hasAnyOverflow |= overflow; // Add in the cookie, and check whether it's overflowed. @@ -823,7 +819,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); } - // Otherwise, we might need to use the overflow intrinsics. + // Otherwise, we might need to use the overflow intrinsics. } else { // There are up to five conditions we need to test for: // 1) if isSigned, we need to check whether numElements is negative; @@ -847,13 +843,13 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, llvm::APInt threshold = llvm::APInt::getOneBitSet(numElementsWidth, sizeWidth); - llvm::Value *thresholdV - = llvm::ConstantInt::get(numElementsType, threshold); + llvm::Value *thresholdV = + llvm::ConstantInt::get(numElementsType, threshold); hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV); numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy); - // Otherwise, if we're signed, we want to sext up to size_t. + // Otherwise, if we're signed, we want to sext up to size_t. } else if (isSigned) { if (numElementsWidth < sizeWidth) numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy); @@ -864,10 +860,10 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, // unsigned overflow. Otherwise, we have to do it here. But at least // in this case, we can subsume the >= minElements check. if (typeSizeMultiplier == 1) - hasOverflow = CGF.Builder.CreateICmpSLT(numElements, - llvm::ConstantInt::get(CGF.SizeTy, minElements)); + hasOverflow = CGF.Builder.CreateICmpSLT( + numElements, llvm::ConstantInt::get(CGF.SizeTy, minElements)); - // Otherwise, zext up to size_t if necessary. + // Otherwise, zext up to size_t if necessary. } else if (numElementsWidth < sizeWidth) { numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy); } @@ -877,15 +873,16 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, if (minElements) { // Don't allow allocation of fewer elements than we have initializers. if (!hasOverflow) { - hasOverflow = CGF.Builder.CreateICmpULT(numElements, - llvm::ConstantInt::get(CGF.SizeTy, minElements)); + hasOverflow = CGF.Builder.CreateICmpULT( + numElements, llvm::ConstantInt::get(CGF.SizeTy, minElements)); } else if (numElementsWidth > sizeWidth) { // The other existing overflow subsumes this check. // We do an unsigned comparison, since any signed value < -1 is // taken care of either above or below. - hasOverflow = CGF.Builder.CreateOr(hasOverflow, - CGF.Builder.CreateICmpULT(numElements, - llvm::ConstantInt::get(CGF.SizeTy, minElements))); + hasOverflow = CGF.Builder.CreateOr( + hasOverflow, + CGF.Builder.CreateICmpULT( + numElements, llvm::ConstantInt::get(CGF.SizeTy, minElements))); } } @@ -899,11 +896,11 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, // can be ignored because the result shouldn't be used if // allocation fails. if (typeSizeMultiplier != 1) { - llvm::Function *umul_with_overflow - = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy); + llvm::Function *umul_with_overflow = + CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy); llvm::Value *tsmV = - llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier); + llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier); llvm::Value *result = CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV}); @@ -923,10 +920,10 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, assert(arraySizeMultiplier == typeSizeMultiplier); numElements = size; - // Otherwise we need a separate multiply. + // Otherwise we need a separate multiply. } else { llvm::Value *asmV = - llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier); + llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier); numElements = CGF.Builder.CreateMul(numElements, asmV); } } @@ -939,8 +936,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, if (cookieSize != 0) { sizeWithoutCookie = size; - llvm::Function *uadd_with_overflow - = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy); + llvm::Function *uadd_with_overflow = + CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy); llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize); llvm::Value *result = @@ -959,9 +956,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, // overwrite 'size' with an all-ones value, which should cause // operator new to throw. if (hasOverflow) - size = CGF.Builder.CreateSelect(hasOverflow, - llvm::Constant::getAllOnesValue(CGF.SizeTy), - size); + size = CGF.Builder.CreateSelect( + hasOverflow, llvm::Constant::getAllOnesValue(CGF.SizeTy), size); } if (cookieSize == 0) @@ -978,21 +974,19 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init, // FIXME: Refactor with EmitExprAsInit. switch (CGF.getEvaluationKind(AllocType)) { case TEK_Scalar: - CGF.EmitScalarInit(Init, nullptr, - CGF.MakeAddrLValue(NewPtr, AllocType), false); + CGF.EmitScalarInit(Init, nullptr, CGF.MakeAddrLValue(NewPtr, AllocType), + false); return; case TEK_Complex: CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType), /*isInit*/ true); return; case TEK_Aggregate: { - AggValueSlot Slot - = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(), - AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - MayOverlap, AggValueSlot::IsNotZeroed, - AggValueSlot::IsSanitizerChecked); + AggValueSlot Slot = AggValueSlot::forAddr( + NewPtr, AllocType.getQualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + MayOverlap, AggValueSlot::IsNotZeroed, + AggValueSlot::IsSanitizerChecked); CGF.EmitAggExpr(Init, Slot); return; } @@ -1021,7 +1015,7 @@ void CodeGenFunction::EmitNewArrayInitializer( CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType); CharUnits ElementAlign = - BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize); + BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize); // Attempt to perform zero-initialization using memset. auto TryMemsetInitialization = [&]() -> bool { @@ -1071,22 +1065,19 @@ void CodeGenFunction::EmitNewArrayInitializer( // Initialize the initial portion of length equal to that of the string // literal. The allocation must be for at least this much; we emitted a // check for that earlier. - AggValueSlot Slot = - AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(), - AggValueSlot::IsDestructed, - AggValueSlot::DoesNotNeedGCBarriers, - AggValueSlot::IsNotAliased, - AggValueSlot::DoesNotOverlap, - AggValueSlot::IsNotZeroed, - AggValueSlot::IsSanitizerChecked); + AggValueSlot Slot = AggValueSlot::forAddr( + CurPtr, ElementType.getQualifiers(), AggValueSlot::IsDestructed, + AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, + AggValueSlot::DoesNotOverlap, AggValueSlot::IsNotZeroed, + AggValueSlot::IsSanitizerChecked); EmitAggExpr(ILE ? ILE->getInit(0) : Init, Slot); // Move past these elements. InitListElements = cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe()) ->getZExtSize(); - CurPtr = Builder.CreateConstInBoundsGEP( - CurPtr, InitListElements, "string.init.end"); + CurPtr = Builder.CreateConstInBoundsGEP(CurPtr, InitListElements, + "string.init.end"); // Zero out the rest, if any remain. llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements); @@ -1209,7 +1200,7 @@ void CodeGenFunction::EmitNewArrayInitializer( NumElements, llvm::ConstantInt::get(NumElements->getType(), InitListElements)); EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE, - /*NewPointerIsChecked*/true, + /*NewPointerIsChecked*/ true, CCE->requiresZeroInitialization()); return; } @@ -1345,10 +1336,9 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF, llvm::CallBase *CallOrInvoke; llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl); CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl)); - RValue RV = - CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( - Args, CalleeType, /*ChainCall=*/false), - Callee, ReturnValueSlot(), Args, &CallOrInvoke); + RValue RV = CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( + Args, CalleeType, /*ChainCall=*/false), + Callee, ReturnValueSlot(), Args, &CallOrInvoke); /// C++1y [expr.new]p10: /// [In a new-expression,] an implementation is allowed to omit a call @@ -1356,8 +1346,8 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF, /// /// We model such elidable calls with the 'builtin' attribute. llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr); - if (CalleeDecl->isReplaceableGlobalAllocationFunction() && - Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) { + if (CalleeDecl->isReplaceableGlobalAllocationFunction() && Fn && + Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) { CallOrInvoke->addFnAttr(llvm::Attribute::Builtin); } @@ -1371,8 +1361,8 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, EmitCallArgs(Args, Type, TheCall->arguments()); // Find the allocation or deallocation function that we're calling. ASTContext &Ctx = getContext(); - DeclarationName Name = Ctx.DeclarationNames - .getCXXOperatorName(IsDelete ? OO_Delete : OO_New); + DeclarationName Name = + Ctx.DeclarationNames.getCXXOperatorName(IsDelete ? OO_Delete : OO_New); for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name)) if (auto *FD = dyn_cast<FunctionDecl>(Decl)) @@ -1390,113 +1380,111 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, } namespace { - /// A cleanup to call the given 'operator delete' function upon abnormal - /// exit from a new expression. Templated on a traits type that deals with - /// ensuring that the arguments dominate the cleanup if necessary. - template<typename Traits> - class CallDeleteDuringNew final : public EHScopeStack::Cleanup { - /// Type used to hold llvm::Value*s. - typedef typename Traits::ValueTy ValueTy; - /// Type used to hold RValues. - typedef typename Traits::RValueTy RValueTy; - struct PlacementArg { - RValueTy ArgValue; - QualType ArgType; - }; - - unsigned NumPlacementArgs : 30; - LLVM_PREFERRED_TYPE(AlignedAllocationMode) - unsigned PassAlignmentToPlacementDelete : 1; - const FunctionDecl *OperatorDelete; - RValueTy TypeIdentity; - ValueTy Ptr; - ValueTy AllocSize; - CharUnits AllocAlign; - - PlacementArg *getPlacementArgs() { - return reinterpret_cast<PlacementArg *>(this + 1); - } +/// A cleanup to call the given 'operator delete' function upon abnormal +/// exit from a new expression. Templated on a traits type that deals with +/// ensuring that the arguments dominate the cleanup if necessary. +template <typename Traits> +class CallDeleteDuringNew final : public EHScopeStack::Cleanup { + /// Type used to hold llvm::Value*s. + typedef typename Traits::ValueTy ValueTy; + /// Type used to hold RValues. + typedef typename Traits::RValueTy RValueTy; + struct PlacementArg { + RValueTy ArgValue; + QualType ArgType; + }; - public: - static size_t getExtraSize(size_t NumPlacementArgs) { - return NumPlacementArgs * sizeof(PlacementArg); - } + unsigned NumPlacementArgs : 30; + LLVM_PREFERRED_TYPE(AlignedAllocationMode) + unsigned PassAlignmentToPlacementDelete : 1; + const FunctionDecl *OperatorDelete; + RValueTy TypeIdentity; + ValueTy Ptr; + ValueTy AllocSize; + CharUnits AllocAlign; + + PlacementArg *getPlacementArgs() { + return reinterpret_cast<PlacementArg *>(this + 1); + } - CallDeleteDuringNew(size_t NumPlacementArgs, - const FunctionDecl *OperatorDelete, - RValueTy TypeIdentity, ValueTy Ptr, ValueTy AllocSize, - const ImplicitAllocationParameters &IAP, - CharUnits AllocAlign) - : NumPlacementArgs(NumPlacementArgs), - PassAlignmentToPlacementDelete( - isAlignedAllocation(IAP.PassAlignment)), - OperatorDelete(OperatorDelete), TypeIdentity(TypeIdentity), Ptr(Ptr), - AllocSize(AllocSize), AllocAlign(AllocAlign) {} - - void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) { - assert(I < NumPlacementArgs && "index out of range"); - getPlacementArgs()[I] = {Arg, Type}; - } +public: + static size_t getExtraSize(size_t NumPlacementArgs) { + return NumPlacementArgs * sizeof(PlacementArg); + } - void Emit(CodeGenFunction &CGF, Flags flags) override { - const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>(); - CallArgList DeleteArgs; - unsigned FirstNonTypeArg = 0; - TypeAwareAllocationMode TypeAwareDeallocation = - TypeAwareAllocationMode::No; - if (OperatorDelete->isTypeAwareOperatorNewOrDelete()) { - TypeAwareDeallocation = TypeAwareAllocationMode::Yes; - QualType SpecializedTypeIdentity = FPT->getParamType(0); - ++FirstNonTypeArg; - DeleteArgs.add(Traits::get(CGF, TypeIdentity), SpecializedTypeIdentity); - } - // The first argument after type-identity parameter (if any) is always - // a void* (or C* for a destroying operator delete for class type C). - DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(FirstNonTypeArg)); - - // Figure out what other parameters we should be implicitly passing. - UsualDeleteParams Params; - if (NumPlacementArgs) { - // A placement deallocation function is implicitly passed an alignment - // if the placement allocation function was, but is never passed a size. - Params.Alignment = - alignedAllocationModeFromBool(PassAlignmentToPlacementDelete); - Params.TypeAwareDelete = TypeAwareDeallocation; - Params.Size = isTypeAwareAllocation(Params.TypeAwareDelete); - } else { - // For a non-placement new-expression, 'operator delete' can take a - // size and/or an alignment if it has the right parameters. - Params = OperatorDelete->getUsualDeleteParams(); - } + CallDeleteDuringNew(size_t NumPlacementArgs, + const FunctionDecl *OperatorDelete, RValueTy TypeIdentity, + ValueTy Ptr, ValueTy AllocSize, + const ImplicitAllocationParameters &IAP, + CharUnits AllocAlign) + : NumPlacementArgs(NumPlacementArgs), + PassAlignmentToPlacementDelete(isAlignedAllocation(IAP.PassAlignment)), + OperatorDelete(OperatorDelete), TypeIdentity(TypeIdentity), Ptr(Ptr), + AllocSize(AllocSize), AllocAlign(AllocAlign) {} + + void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) { + assert(I < NumPlacementArgs && "index out of range"); + getPlacementArgs()[I] = {Arg, Type}; + } - assert(!Params.DestroyingDelete && - "should not call destroying delete in a new-expression"); - - // The second argument can be a std::size_t (for non-placement delete). - if (Params.Size) - DeleteArgs.add(Traits::get(CGF, AllocSize), - CGF.getContext().getSizeType()); - - // The next (second or third) argument can be a std::align_val_t, which - // is an enum whose underlying type is std::size_t. - // FIXME: Use the right type as the parameter type. Note that in a call - // to operator delete(size_t, ...), we may not have it available. - if (isAlignedAllocation(Params.Alignment)) - DeleteArgs.add(RValue::get(llvm::ConstantInt::get( - CGF.SizeTy, AllocAlign.getQuantity())), - CGF.getContext().getSizeType()); - - // Pass the rest of the arguments, which must match exactly. - for (unsigned I = 0; I != NumPlacementArgs; ++I) { - auto Arg = getPlacementArgs()[I]; - DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType); - } + void Emit(CodeGenFunction &CGF, Flags flags) override { + const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>(); + CallArgList DeleteArgs; + unsigned FirstNonTypeArg = 0; + TypeAwareAllocationMode TypeAwareDeallocation = TypeAwareAllocationMode::No; + if (OperatorDelete->isTypeAwareOperatorNewOrDelete()) { + TypeAwareDeallocation = TypeAwareAllocationMode::Yes; + QualType SpecializedTypeIdentity = FPT->getParamType(0); + ++FirstNonTypeArg; + DeleteArgs.add(Traits::get(CGF, TypeIdentity), SpecializedTypeIdentity); + } + // The first argument after type-identity parameter (if any) is always + // a void* (or C* for a destroying operator delete for class type C). + DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(FirstNonTypeArg)); + + // Figure out what other parameters we should be implicitly passing. + UsualDeleteParams Params; + if (NumPlacementArgs) { + // A placement deallocation function is implicitly passed an alignment + // if the placement allocation function was, but is never passed a size. + Params.Alignment = + alignedAllocationModeFromBool(PassAlignmentToPlacementDelete); + Params.TypeAwareDelete = TypeAwareDeallocation; + Params.Size = isTypeAwareAllocation(Params.TypeAwareDelete); + } else { + // For a non-placement new-expression, 'operator delete' can take a + // size and/or an alignment if it has the right parameters. + Params = OperatorDelete->getUsualDeleteParams(); + } - // Call 'operator delete'. - EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); + assert(!Params.DestroyingDelete && + "should not call destroying delete in a new-expression"); + + // The second argument can be a std::size_t (for non-placement delete). + if (Params.Size) + DeleteArgs.add(Traits::get(CGF, AllocSize), + CGF.getContext().getSizeType()); + + // The next (second or third) argument can be a std::align_val_t, which + // is an enum whose underlying type is std::size_t. + // FIXME: Use the right type as the parameter type. Note that in a call + // to operator delete(size_t, ...), we may not have it available. + if (isAlignedAllocation(Params.Alignment)) + DeleteArgs.add(RValue::get(llvm::ConstantInt::get( + CGF.SizeTy, AllocAlign.getQuantity())), + CGF.getContext().getSizeType()); + + // Pass the rest of the arguments, which must match exactly. + for (unsigned I = 0; I != NumPlacementArgs; ++I) { + auto Arg = getPlacementArgs()[I]; + DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType); } - }; -} + + // Call 'operator delete'. + EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); + } +}; +} // namespace /// Enter a cleanup to call 'operator delete' if the initializer in a /// new-expression throws. @@ -1534,7 +1522,7 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF, const CXXNewExpr *E, DominatingValue<RValue>::saved_type SavedNewPtr = DominatingValue<RValue>::save(CGF, RValue::get(NewPtr, CGF)); DominatingValue<RValue>::saved_type SavedAllocSize = - DominatingValue<RValue>::save(CGF, RValue::get(AllocSize)); + DominatingValue<RValue>::save(CGF, RValue::get(AllocSize)); DominatingValue<RValue>::saved_type SavedTypeIdentity = DominatingValue<RValue>::save(CGF, TypeIdentity); struct ConditionalCleanupTraits { @@ -1588,9 +1576,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { llvm::Value *numElements = nullptr; llvm::Value *allocSizeWithoutCookie = nullptr; - llvm::Value *allocSize = - EmitCXXNewAllocSize(*this, E, minElements, numElements, - allocSizeWithoutCookie); + llvm::Value *allocSize = EmitCXXNewAllocSize( + *this, E, minElements, numElements, allocSizeWithoutCookie); CharUnits allocAlign = getContext().getTypeAlignInChars(allocType); // Emit the allocation call. If the allocator is a global placement @@ -1621,7 +1608,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { } else { const FunctionProtoType *allocatorType = - allocator->getType()->castAs<FunctionProtoType>(); + allocator->getType()->castAs<FunctionProtoType>(); ImplicitAllocationParameters IAP = E->implicitAllocationParameters(); unsigned ParamsToSkip = 0; if (isTypeAwareAllocation(IAP.PassTypeIdentity)) { @@ -1663,10 +1650,10 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { // FIXME: Why do we not pass a CalleeDecl here? EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(), - /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip); + /*AC*/ AbstractCallee(), /*ParamsToSkip*/ ParamsToSkip); RValue RV = - EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); + EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); if (auto *newCall = dyn_cast<llvm::CallBase>(RV.getScalarVal())) { if (auto *CGDI = getDebugInfo()) { @@ -1738,9 +1725,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { CalculateCookiePadding(*this, E).isZero()); if (allocSize != allocSizeWithoutCookie) { assert(E->isArray()); - allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation, - numElements, - E, allocType); + allocation = CGM.getCXXABI().InitializeArrayCookie( + *this, allocation, numElements, E, allocType); } llvm::Type *elementTy = ConvertTypeForMem(allocType); @@ -1873,27 +1859,25 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, (*TagAlloca)->eraseFromParent(); } namespace { - /// Calls the given 'operator delete' on a single object. - struct CallObjectDelete final : EHScopeStack::Cleanup { - llvm::Value *Ptr; - const FunctionDecl *OperatorDelete; - QualType ElementType; - - CallObjectDelete(llvm::Value *Ptr, - const FunctionDecl *OperatorDelete, - QualType ElementType) +/// Calls the given 'operator delete' on a single object. +struct CallObjectDelete final : EHScopeStack::Cleanup { + llvm::Value *Ptr; + const FunctionDecl *OperatorDelete; + QualType ElementType; + + CallObjectDelete(llvm::Value *Ptr, const FunctionDecl *OperatorDelete, + QualType ElementType) : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} - void Emit(CodeGenFunction &CGF, Flags flags) override { - CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); - } - }; -} + void Emit(CodeGenFunction &CGF, Flags flags) override { + CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); + } +}; +} // namespace -void -CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete, - llvm::Value *CompletePtr, - QualType ElementType) { +void CodeGenFunction::pushCallObjectDeleteCleanup( + const FunctionDecl *OperatorDelete, llvm::Value *CompletePtr, + QualType ElementType) { EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr, OperatorDelete, ElementType); } @@ -1941,10 +1925,9 @@ static bool EmitObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, if (Dtor->isVirtual()) { bool UseVirtualCall = true; const Expr *Base = DE->getArgument(); - if (auto *DevirtualizedDtor = - dyn_cast_or_null<const CXXDestructorDecl>( - Dtor->getDevirtualizedMethod( - Base, CGF.CGM.getLangOpts().AppleKext))) { + if (auto *DevirtualizedDtor = dyn_cast_or_null<const CXXDestructorDecl>( + Dtor->getDevirtualizedMethod( + Base, CGF.CGM.getLangOpts().AppleKext))) { UseVirtualCall = false; const CXXRecordDecl *DevirtualizedClass = DevirtualizedDtor->getParent(); @@ -1979,8 +1962,7 @@ static bool EmitObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, if (Dtor) CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, - /*Delegating=*/false, - Ptr, ElementType); + /*Delegating=*/false, Ptr, ElementType); else if (auto Lifetime = ElementType.getObjCLifetime()) { switch (Lifetime) { case Qualifiers::OCL_None: @@ -2010,34 +1992,30 @@ static bool EmitObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, } namespace { - /// Calls the given 'operator delete' on an array of objects. - struct CallArrayDelete final : EHScopeStack::Cleanup { - llvm::Value *Ptr; - const FunctionDecl *OperatorDelete; - llvm::Value *NumElements; - QualType ElementType; - CharUnits CookieSize; - - CallArrayDelete(llvm::Value *Ptr, - const FunctionDecl *OperatorDelete, - llvm::Value *NumElements, - QualType ElementType, - CharUnits CookieSize) +/// Calls the given 'operator delete' on an array of objects. +struct CallArrayDelete final : EHScopeStack::Cleanup { + llvm::Value *Ptr; + const FunctionDecl *OperatorDelete; + llvm::Value *NumElements; + QualType ElementType; + CharUnits CookieSize; + + CallArrayDelete(llvm::Value *Ptr, const FunctionDecl *OperatorDelete, + llvm::Value *NumElements, QualType ElementType, + CharUnits CookieSize) : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), ElementType(ElementType), CookieSize(CookieSize) {} - void Emit(CodeGenFunction &CGF, Flags flags) override { - CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements, - CookieSize); - } - }; -} + void Emit(CodeGenFunction &CGF, Flags flags) override { + CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements, + CookieSize); + } +}; +} // namespace /// Emit the code for deleting an array of objects. -static void EmitArrayDelete(CodeGenFunction &CGF, - const CXXDeleteExpr *E, - Address deletedPtr, - QualType elementType) { +static void EmitArrayDelete(CodeGenFunction &CGF, const CXXDeleteExpr *E, + Address deletedPtr, QualType elementType) { llvm::Value *numElements = nullptr; llvm::Value *allocatedPtr = nullptr; CharUnits cookieSize; @@ -2048,10 +2026,9 @@ static void EmitArrayDelete(CodeGenFunction &CGF, // Make sure that we call delete even if one of the dtors throws. const FunctionDecl *operatorDelete = E->getOperatorDelete(); - CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, - allocatedPtr, operatorDelete, - numElements, elementType, - cookieSize); + CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, allocatedPtr, + operatorDelete, numElements, + elementType, cookieSize); // Destroy the elements. if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) { @@ -2059,11 +2036,11 @@ static void EmitArrayDelete(CodeGenFunction &CGF, CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); CharUnits elementAlign = - deletedPtr.getAlignment().alignmentOfArrayElement(elementSize); + deletedPtr.getAlignment().alignmentOfArrayElement(elementSize); llvm::Value *arrayBegin = deletedPtr.emitRawPointer(CGF); llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP( - deletedPtr.getElementType(), arrayBegin, numElements, "delete.end"); + deletedPtr.getElementType(), arrayBegin, numElements, "delete.end"); // Note that it is legal to allocate a zero-length array, and we // can never fold the check away because the length should always _______________________________________________ cfe-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
