junrushao commented on code in PR #13973:
URL: https://github.com/apache/tvm/pull/13973#discussion_r1109366677
##########
src/target/source/codegen_cuda.cc:
##########
@@ -926,6 +926,37 @@ void CodeGenCUDA::VisitExpr_(const CallNode* op,
std::ostream& os) {
} else if (op->op.same_as(builtin::ptx_wait_group())) {
std::string N = this->PrintExpr(op->args[0]);
this->stream << "__asm__ __volatile__(\"cp.async.wait_group " + N +
";\");\n\n";
+ } else if (op->op.same_as(builtin::ptx_ldg32())) {
+ /*
+ asm volatile (
+ "{.reg .pred p;\n"
+ " setp.ne.b32 p, %2, 0;\n"
+ // " @p ld.global.nc.f32 %0, [%1];}\n"t
+ " @p ld.global.nc.L2::128B.f32 %0, [%1];}\n"
+ : "=f"(reg)
+ : "l"(addr), "r"((int)guard)
+ );
+ */
+
+ // get local
+ std::string reg = this->PrintExpr(op->args[0]);
+ // get guard
+ std::string guard = this->PrintExpr(op->args[1]);
+ const BufferLoadNode* addr_buffer = op->args[2].as<BufferLoadNode>();
+ std::string global_addr = this->PrintExpr(addr_buffer->indices[0]);
+ std::string global_buffer = this->PrintExpr(addr_buffer->buffer->data);
+ std::string local_addr = this->PrintExpr(op->args[3]);
+ this->stream << "asm volatile (\n";
+ this->stream << "\"{.reg .pred p;\\n\"\n";
+ this->stream << "\" setp.ne.b32 p, %2, 0;\\n\"\n";
+ this->stream << "\" @!p mov.b32 %0, 0;\\n\"\n";
+ this->stream << "\" @p ld.global.nc.f32 %0, [%1];}\\n\"\n";
+ // stream << "\" @p ld.global.nc.L2::128B.f32 %0, [%1];}\\n\"\n" ;
+ stream << ": \"=f\"(" << reg << "[" << local_addr << "]"
+ << ")\n";
+ stream << ": \"l\"((void*)(" << global_buffer << "+" << global_addr <<
")), \"r\"((int)"
+ << guard << ")\n";
+ stream << ");\n";
Review Comment:
perhaps it would be clearer to write this way:
https://github.com/apache/tvm/pull/13966/files#diff-28ce493acf6a737cd561f3996bd897c8c14edc056f9125503344453dcf390d49R668-R690
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]