lhutton1 commented on a change in pull request #5915: URL: https://github.com/apache/incubator-tvm/pull/5915#discussion_r456313568
########## File path: src/runtime/contrib/arm_compute_lib/acl_runtime.cc ########## @@ -0,0 +1,310 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file src/runtime/contrib/arm_compute_lib/acl_runtime.cc + * \brief A simple JSON runtime for Arm Compute Library. + */ + +#include <tvm/runtime/ndarray.h> +#include <tvm/runtime/registry.h> + +#include "../../file_util.h" +#include "../json/json_node.h" +#include "../json/json_runtime.h" + +#ifdef TVM_GRAPH_RUNTIME_ARM_COMPUTE_LIB +#include <arm_compute/core/Types.h> +#include <arm_compute/runtime/NEON/functions/NEConvolutionLayer.h> +#include <arm_compute/runtime/NEON/functions/NEPoolingLayer.h> +#include <arm_compute/runtime/NEON/functions/NEReshapeLayer.h> + +#include "acl_allocator.h" +#include "acl_utils.h" +#endif + +namespace tvm { +namespace runtime { +namespace contrib { + +using namespace tvm::runtime::json; + +#ifdef TVM_GRAPH_RUNTIME_ARM_COMPUTE_LIB +using namespace arm_compute_lib; +#endif + +class ACLRuntime : public JSONRuntimeBase { + public: + /*! + * \brief The ACL runtime module. Deserialize the provided functions + * on creation and store in the layer cache. + * + * \param symbol_name The name of the function. + * \param graph_json serialized JSON representation of a sub-graph. + * \param const_names The names of each constant in the sub-graph. + */ + explicit ACLRuntime(const std::string& symbol_name, const std::string& graph_json, + const Array<String>& const_names) + : JSONRuntimeBase(symbol_name, graph_json, const_names) {} + + /*! + * \brief The type key of the module. + * + * \return module type key. + */ + const char* type_key() const override { return "arm_compute_lib"; } + + /*! + * \brief Initialize runtime. Create ACL layer from JSON + * representation. + * + * \param consts The constant params from compiled model. + */ + void Init(const Array<NDArray>& consts) override { + CHECK_EQ(consts.size(), const_idx_.size()) + << "The number of input constants must match the number of required."; + SetupConstants(consts); + BuildEngine(); + } + + /*! + * \brief Get the JSON generated by codegen. + * + * \param format the format to return (only JSON for the time being) + * \return A string of JSON. + */ + std::string GetSource(const std::string& format) override { + if (format == "json") { + return graph_json_; + } + LOG(FATAL) << "Format not supported by Arm Compute Library runtime."; + return ""; + } + +#ifdef TVM_GRAPH_RUNTIME_ARM_COMPUTE_LIB + /*! + * \brief Unpack inputs and outputs and run inference on a given layer. + * + * \param args Access inputs and outputs. + * \param function The layer to execute inference on. + * \return Status of inference. + */ + void Run() override { + for (size_t i = 0; i < input_nodes_.size(); ++i) { + auto nid = input_nodes_[i]; + uint32_t eid = EntryID(nid, 0); + if (nodes_[nid].GetOpType() == "input") { + void* data = data_entry_[eid]->data; + CheckACLError(layer_.inputs[i].allocator()->import_memory(data)); + } + } + + for (size_t i = 0; i < outputs_.size(); ++i) { + uint32_t eid = EntryID(outputs_[i]); + void* data = data_entry_[eid]->data; + CheckACLError(layer_.outputs[i].allocator()->import_memory(data)); + } + + this->layer_.function->run(); + } + + private: + /*! + * \brief Build ACL layer from JSON representation and cache. + * + * \note For the time being only one layer or operator is supported + * per engine. + */ + void BuildEngine() { + std::shared_ptr<arm_compute::MemoryManagerOnDemand> mm = MakeMemoryManager(); + int num_pools = 0; + + for (size_t i = 0; i < input_nodes_.size(); ++i) { + uint32_t nid = input_nodes_[i]; + const auto& node = nodes_[nid]; + if (node.GetOpType() == "input") { + layer_.inputs.push_back(MakeTensor(node)); + } else if (node.GetOpType() == "const") { + uint32_t eid = EntryID(nid, 0); + void* data = data_entry_[eid]->data; + layer_.const_inputs.push_back(MakeTensor(node, data)); + } + } + + bool found_kernel_node = false; + for (size_t nid = 0; nid < nodes_.size(); ++nid) { + const auto& node = nodes_[nid]; + if (found_kernel_node) { + LOG(FATAL) + << "Arm Compute Library runtime module only supports one kernel node per function."; + } + if (node.GetOpType() == "kernel") { + found_kernel_node = true; + auto op_name = node.GetOpName(); + if ("nn.conv2d" == op_name || "arm_compute_lib.conv2d" == op_name) { Review comment: Yes they could all be `nn.conv2d`, when writing I thought there may be a case where we want to distinguish between a composite operator and a single operator, although I don't think this will happen. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
