manupa-arm commented on a change in pull request #9565:
URL: https://github.com/apache/tvm/pull/9565#discussion_r783276750



##########
File path: src/target/source/source_module.cc
##########
@@ -197,45 +202,161 @@ class CSourceCrtMetadataModuleNode : public 
runtime::ModuleNode {
           << "}\n";
   }
 
+  String GenerateDLTensorStructWrapper(String reference_arg) {
+    code_ << "DLTensor " << reference_arg << "_dlt = {\n";
+    code_ << ".data = &" << reference_arg << "\n";
+    code_ << "};\n";
+    code_ << "TVMValue " << reference_arg << "_tvmv = {\n";
+    code_ << ".v_handle = &" << reference_arg << "_dlt\n";
+    code_ << "};\n";
+    return reference_arg + "_tvmv";
+  }
+
+  void GenerateInternalWorkspaceBuffers() {
+    if (metadata_->pool_inputs.defined()) {
+      for (const auto& kv : metadata_->pool_inputs.value()) {
+        tir::usmp::AllocatedPoolInfo allocated_pool_info = kv.second;
+        if (allocated_pool_info->pool_info->is_internal) {
+          code_ << "__attribute__((section(\".bss.tvm\"), ";
+          code_ << "aligned(" << 16 << ")))\n";
+          code_ << "static uint8_t " << 
allocated_pool_info->pool_info->pool_name << "["
+                << allocated_pool_info->allocated_size->value << "];\n";
+        }
+      }
+    }
+  }
+
+  bool IsInternalWorkspaceBuffer(const tir::Var& pool_var) {
+    if (metadata_->pool_inputs.defined()) {
+      Map<tir::Var, tir::usmp::AllocatedPoolInfo> allocated_pool_infos =
+          metadata_->pool_inputs.value();
+      if (allocated_pool_infos.find(pool_var) != allocated_pool_infos.end()) {
+        tir::usmp::AllocatedPoolInfo allocate_pool_info = 
allocated_pool_infos[pool_var];
+        if (allocate_pool_info->pool_info->is_internal) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
   void GenerateEntrypointForUnpackedAPI(const std::string& entrypoint_name,
                                         const std::string& run_func) {
     code_ << "TVM_DLL int32_t " << run_func << "(";
-    unsigned int total_args = (metadata_->inputs.size() + 
metadata_->num_outputs);
-    for (unsigned int i = 0; i < total_args; ++i) {
-      code_ << "void* arg" << i;
-      if (i + 1 != total_args) {
-        code_ << ",";
+
+    {
+      std::stringstream call_args_ss;
+      for (const tir::Var& input_var : metadata_->inputs) {
+        if (input_var->type_annotation.defined()) {
+          codegen_c_.PrintType(input_var->type_annotation, call_args_ss);
+        } else {
+          codegen_c_.PrintType(input_var.dtype(), call_args_ss);
+        }
+        call_args_ss << " " << input_var->name_hint << ",";
+      }
+      for (unsigned int i = 0; i < metadata_->num_outputs; ++i) {
+        call_args_ss << "void* output" << i << ",";
+      }
+      for (const tir::Var& pool_var : metadata_->pools) {
+        if (pool_var->type_annotation.defined()) {
+          codegen_c_.PrintType(pool_var->type_annotation, call_args_ss);
+        } else {
+          codegen_c_.PrintType(pool_var.dtype(), call_args_ss);
+        }
+        call_args_ss << " " << pool_var->name_hint << ",";
       }
+      std::string call_args_str = call_args_ss.str();
+      call_args_str.pop_back();
+      code_ << call_args_str;
     }
+
     code_ << ");\n";
     code_ << "int32_t " << entrypoint_name;
     code_ << "(void* args, void* type_code, int num_args, void* out_value, 
void* "
              "out_type_code, void* resource_handle) {\n";
     code_ << "return " << run_func << "(";
-    for (unsigned int i = 0; i < metadata_->inputs.size(); ++i) {
-      code_ << "((DLTensor*)(((TVMValue*)args)[" << i << 
"].v_handle))[0].data,";
+
+    {
+      std::stringstream call_args_ss;
+      for (unsigned int i = 0; i < metadata_->inputs.size(); ++i) {
+        call_args_ss << "((DLTensor*)(((TVMValue*)args)[" << i << 
"].v_handle))[0].data,";

Review comment:
       Being consistent with codegen_c, unless I missed a change to that.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to