================
@@ -726,6 +727,108 @@ static RValue tryEmitFPMathIntrinsic(CIRGenFunction &cgf,
const CallExpr *e,
return RValue::getIgnored();
}
+static mlir::Type
+decodeFixedType(ArrayRef<llvm::Intrinsic::IITDescriptor> &infos,
+ mlir::MLIRContext *context) {
+ using namespace llvm::Intrinsic;
+
+ IITDescriptor descriptor = infos.front();
+ infos = infos.slice(1);
+
+ switch (descriptor.Kind) {
+ case IITDescriptor::Void:
+ return cir::VoidType::get(context);
+ case IITDescriptor::Integer:
+ return cir::IntType::get(context, descriptor.Integer_Width,
+ /*isSigned=*/true);
+ case IITDescriptor::Float:
+ return cir::SingleType::get(context);
+ case IITDescriptor::Double:
+ return cir::DoubleType::get(context);
+ default:
+ llvm_unreachable("NYI");
+ }
+}
+
+/// Helper function to correct integer signedness for intrinsic arguments.
+/// IIT always returns signed integers, but the actual intrinsic may expect
+/// unsigned integers based on the AST FunctionDecl parameter types.
+static mlir::Type getIntrinsicArgumentTypeFromAST(mlir::Type iitType,
+ const CallExpr *E,
+ unsigned argIndex,
+ mlir::MLIRContext *context) {
+ // If it's not an integer type, return as-is
+ auto intTy = dyn_cast<cir::IntType>(iitType);
+ if (!intTy)
+ return iitType;
+
+ // Get the FunctionDecl from the CallExpr
+ const FunctionDecl *FD = nullptr;
+ if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(E->getCallee()->IgnoreImpCasts())) {
+ FD = dyn_cast<FunctionDecl>(DRE->getDecl());
+ }
+
+ // If we have FunctionDecl and this argument exists, check its signedness
+ if (FD && argIndex < FD->getNumParams()) {
+ QualType paramType = FD->getParamDecl(argIndex)->getType();
+ if (paramType->isUnsignedIntegerType()) {
+ // Create unsigned version of the type
+ return cir::IntType::get(context, intTy.getWidth(), /*isSigned=*/false);
+ }
+ }
+
+ // Default: keep IIT type (signed)
+ return iitType;
+}
+
+static mlir::Value getCorrectedPtr(mlir::Value argValue, mlir::Type expectedTy,
+ CIRGenBuilderTy &builder) {
+ auto ptrType = mlir::dyn_cast<cir::PointerType>(argValue.getType());
+ assert(ptrType && "expected pointer type");
+
+ auto expectedPtrType = mlir::cast<cir::PointerType>(expectedTy);
+ assert(ptrType.getPointee() != expectedPtrType.getPointee() &&
+ "types should not match");
+
+ if (ptrType.getAddrSpace() != expectedPtrType.getAddrSpace()) {
+ auto newPtrType = cir::PointerType::get(ptrType.getPointee(),
+ expectedPtrType.getAddrSpace());
+ return builder.createAddrSpaceCast(argValue, newPtrType);
+ }
+
+ return argValue;
+}
----------------
Priyanshu3820 wrote:
We don't necessarily need the AST type, the `expectedTy` parameter already
contains the correct target type from the intrinsic signature. And even for
address space, there will be only a few AMDGPU builtins that would need the
handling(for eg. __builtin_amdgcn_queue_ptr that returns pointer in address
space 4 if i'm not wrong). I don't see any other builtin in any other
architecture that would need that. And there are no builtin with any other type
of mismatch that would fall through this generic path. But even considering the
fact that no such builtin will reach this point, the fact that this function
first asserts that the pointee types must be different(the very reason to call
this function), handles address space differences correctly but then returns
`argValue` unchanged when address spaces match seems a bit incorrect. So, I
think it's less about preventing a crash and more about code correctness.
https://github.com/llvm/llvm-project/pull/179098
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits