quic-sanirudh commented on code in PR #15953:
URL: https://github.com/apache/tvm/pull/15953#discussion_r1367878661
##########
src/target/llvm/codegen_llvm.cc:
##########
@@ -1320,7 +1320,13 @@ void CodeGenLLVM::EmitFloat16ConversionBuiltins(bool
use_float16_abi) {
llvm::Value* CodeGenLLVM::CreateIntrinsic(const CallNode* op) {
if (op->op.same_as(builtin_call_llvm_intrin_) ||
op->op.same_as(builtin_call_llvm_pure_intrin_)) {
ICHECK_GE(op->args.size(), 2U);
- llvm::Intrinsic::ID id =
static_cast<llvm::Intrinsic::ID>(Downcast<IntImm>(op->args[0])->value);
+ llvm::Intrinsic::ID id = 0;
+ if (op->args[0]->IsInstance<StringImmNode>()) {
+ id =
llvm::Function::lookupIntrinsicID(Downcast<StringImm>(op->args[0])->value.c_str());
+ } else if (op->args[0]->IsInstance<IntImmNode>()) {
+ id =
static_cast<llvm::Intrinsic::ID>(Downcast<IntImm>(op->args[0])->value);
+ }
+ assert(id != 0);
Review Comment:
same here
##########
src/target/llvm/codegen_arm.cc:
##########
@@ -55,7 +55,13 @@ class CodeGenARM final : public CodeGenCPU {
llvm::Value* CodeGenARM::CreateIntrinsic(const CallNode* op) {
if (op->op.same_as(builtin_call_llvm_intrin_) ||
op->op.same_as(builtin_call_llvm_pure_intrin_)) {
- llvm::Intrinsic::ID id =
static_cast<llvm::Intrinsic::ID>(Downcast<IntImm>(op->args[0])->value);
+ llvm::Intrinsic::ID id = 0;
+ if (op->args[0]->IsInstance<StringImmNode>()) {
+ id =
llvm::Function::lookupIntrinsicID(Downcast<StringImm>(op->args[0])->value.c_str());
+ } else if (op->args[0]->IsInstance<IntImmNode>()) {
+ id =
static_cast<llvm::Intrinsic::ID>(Downcast<IntImm>(op->args[0])->value);
+ }
+ assert(id != 0);
Review Comment:
Could you use a `ICHECK` instead of assert here so that the error would show
a stack trace. I don't think this could be user facing as the python API also
seems to check this, so this is more of a sanity check
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]