Lunderberg commented on a change in pull request #10558:
URL: https://github.com/apache/tvm/pull/10558#discussion_r824962803



##########
File path: src/runtime/hexagon/hexagon/hexagon_device_api_v2.cc
##########
@@ -163,6 +170,63 @@ 
TVM_REGISTER_GLOBAL("device_api.hexagon.mem_copy").set_body([](TVMArgs args, TVM
   *rv = static_cast<int32_t>(0);
 });
 
+std::map<void*, HexagonBuffer*> vtcmallocs;
+
+TVM_REGISTER_GLOBAL("device_api.hexagon.AllocNd").set_body([](TVMArgs args, 
TVMRetValue* rv) {

Review comment:
       Instead of using a lambda for the body, can we move this to a separate 
function?  It tends to make debugging easier later on, and becomes a template 
for a generalized function we can propose adding to `c_runtime_api.cc`.

##########
File path: src/tir/transforms/lower_tvm_builtin.cc
##########
@@ -459,32 +459,40 @@ class BuiltinLower : public StmtExprMutator {
     return Call(op->dtype, builtin::tvm_call_trace_packed_lowered(), 
packed_args);
   }
 
-  Stmt MakeTextureAlloc(const LetStmtNode* let, const CallNode* call) {
+  Stmt MakeNdMemAllocWithScope(const LetStmtNode* let, const CallNode* call) {
     ICHECK(device_type_.defined()) << "Unknown device type in current IR";
     ICHECK(device_id_.defined()) << "Unknown device id in current IR";
     Stmt throw_last_error = Evaluate(Call(DataType::Int(32), 
builtin::tvm_throw_last_error(), {}));
 
     Stmt body = SeqStmt(
         {IfThenElse(Call(DataType::Bool(1), builtin::isnullptr(), {let->var}), 
throw_last_error),
          let->body});
+
     DataType dtype =
         
let->var->type_annotation.as<PointerTypeNode>()->element_type.as<PrimTypeNode>()->dtype;
 
     std::string fdevapi_prefix = "device_api.";
     fdevapi_prefix += 
runtime::DeviceName(device_type_.as<IntImmNode>()->value);
-    Call call_packed =
-        Call(let->var.dtype(), builtin::tvm_call_packed(),
-             {StringImm(fdevapi_prefix + ".AllocTexture"), 
cast(DataType::Int(32), device_type_),
-              cast(DataType::Int(32), device_id_), cast(DataType::UInt(64), 
call->args[0]),
-              cast(DataType::UInt(64), call->args[1]), 
IntImm(DataType::Int(32), dtype.code()),
-              IntImm(DataType::Int(32), dtype.bits())});
 
+    Array<PrimExpr> args = {
+        StringImm(fdevapi_prefix + ".AllocNd"),
+        device_type_,
+        device_id_,
+        IntImm(DataType::Int(32), dtype.code()),
+        IntImm(DataType::Int(32), dtype.bits()),
+    };
+
+    for (size_t i = 0; i < call->args.size(); ++i) {
+      args.push_back(call->args[i]);
+    }
+
+    Call call_packed = Call(let->var.dtype(), builtin::tvm_call_packed(), 
args);
     Stmt alloca = LetStmt(let->var, call_packed, body);
 
-    Call free_op =
-        Call(DataType::Int(32), builtin::tvm_call_packed(),
-             {StringImm(fdevapi_prefix + ".FreeTexture"), 
cast(DataType::Int(32), device_type_),
-              cast(DataType::Int(32), device_id_), let->var});
+    PrimExpr storage_scope = call->args[0];
+    Call free_op = Call(
+        DataType::Int(32), builtin::tvm_call_packed(),
+        {StringImm(fdevapi_prefix + ".FreeNd"), device_type_, device_id_, 
storage_scope, let->var});

Review comment:
       I like the reuse here for both texture/vtcm, and it makes a good 
argument for having it eventually pulled into the deviceapi.

##########
File path: src/runtime/hexagon/hexagon/hexagon_device_api_v2.cc
##########
@@ -163,6 +170,63 @@ 
TVM_REGISTER_GLOBAL("device_api.hexagon.mem_copy").set_body([](TVMArgs args, TVM
   *rv = static_cast<int32_t>(0);
 });
 
+std::map<void*, HexagonBuffer*> vtcmallocs;
+
+TVM_REGISTER_GLOBAL("device_api.hexagon.AllocNd").set_body([](TVMArgs args, 
TVMRetValue* rv) {
+  int32_t device_type = args[0];
+  int32_t device_id = args[1];
+  int32_t dtype_code_hint = args[2];
+  int32_t dtype_bits_hint = args[3];
+  std::string scope = args[4];
+  CHECK(scope.find("vtcm") != std::string::npos);
+  int64_t ndim = args[5];
+  // Forcing contiguous allocation, for now
+  // TODO(Straw): Enable discontiguous allocation after RFC 39 lands
+  CHECK_EQ(ndim, 1);
+  std::vector<int64_t> shape;
+  for (int i = 0; i < ndim; ++i) {
+    shape.push_back(args[6 + i]);
+  }
+
+  Device dev;
+  dev.device_type = static_cast<DLDeviceType>(device_type);
+  dev.device_id = device_id;
+
+  DLDataType type_hint;
+  type_hint.code = static_cast<decltype(type_hint.code)>(dtype_code_hint);
+  type_hint.bits = static_cast<decltype(type_hint.bits)>(dtype_bits_hint);
+  type_hint.lanes = 1;
+
+  HexagonDeviceAPIv2* hexapi = HexagonDeviceAPIv2::Global();
+  HexagonBuffer* hexbuf = reinterpret_cast<HexagonBuffer*>(
+      hexapi->AllocVtcmWorkspace(dev, ndim, shape.data(), type_hint, 
String(scope)));
+
+  // Assumes a single contiguous allocation
+  // TODO(Straw): Enable discontiguous allocation after RFC 39 lands
+  void* ptr = hexbuf->GetPointer()[0];
+  vtcmallocs[ptr] = hexbuf;
+  *rv = ptr;
+});
+
+TVM_REGISTER_GLOBAL("device_api.hexagon.FreeNd").set_body([](TVMArgs args, 
TVMRetValue* rv) {

Review comment:
       Same request, can we move the body into a free-standing function?  That 
also lets us use `.set_body_typed` to avoid needing to explicitly unpack the 
arguments.

##########
File path: src/runtime/hexagon/hexagon/hexagon_device_api_v2.cc
##########
@@ -119,6 +119,13 @@ void HexagonDeviceAPIv2::FreeWorkspace(Device dev, void* 
data) {
   workspace_allocations_.erase(it);
 }
 
+void* HexagonDeviceAPIv2::AllocVtcmWorkspace(Device dev, int ndim, const 
int64_t* shape,
+                                             DLDataType dtype, 
Optional<String> mem_scope) {
+  return AllocDataSpace(dev, ndim, shape, dtype, mem_scope);

Review comment:
       * I think we should add an assert here that `ndim` is either 1 or 2, the 
supported number of physical dimensions.  
   
   * Does this call `AllocDataSpace` as implemented by `DeviceAPI`, or as 
implemented by `HexagonDeviceAPIV2`?  I only see an override of 
`HexagonDeviceAPIv2::AllocDataSpace` for the overload without a `mem_scope` 
argument.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to