manupa-arm commented on code in PR #11208: URL: https://github.com/apache/tvm/pull/11208#discussion_r875009499
########## src/relay/backend/aot/annotate_used_memory.cc: ########## @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file src/relay/backend/aot/annotate_used_memory.cc + * \brief Analyzes the memory pressure at the callsite of primitive functions. + */ + +#include <tvm/ir/module.h> +#include <tvm/relay/transform.h> + +#include "../../transforms/device_aware_visitors.h" +#include "../manifest_lifetimes.h" + +namespace tvm { +namespace relay { +namespace backend { +namespace aot { + +/*! + * \brief Annotates the memory usage of each primitive function by analysing the liveness + * of the input/output tensors at the function callsite and calculating the total amount of + * memory these tensors require. + */ +class AnnotateUsedMemoryMutator : public transform::DeviceAwareExprMutator { + public: + AnnotateUsedMemoryMutator(const IRModule& module, const transform::ControlFlowGraph& cfg, + const transform::LivenessAnalysis& lva) + : DeviceAwareExprMutator(module), control_flow_graph_(cfg), liveness_(lva) {} + + /*! + * \brief Get the memory required for a primitive Relay function by calculating the total + * bytes of the live tensors at the callsite of the function. + * + * \param live_tensors The tensors that are live when the function is called. + * \return int The total number of bytes a function requires. + */ + int GetMemoryUsage(const transform::VarSet& live_tensors) { + Array<Type> types_stack = {}; + int memory_usage = 0; + + for (const Var& var : live_tensors) { + Type var_type = var->checked_type(); + ICHECK(var_type.defined()) << "InferTypes pass should be run before AnnotateUsedMemory pass."; + types_stack.push_back(var_type); + } + + while (!types_stack.empty()) { Review Comment: Is there a reason to do depth-first traversal in the types ? (As opposed to expanding the types in flat manner to get the bytes) ########## tests/python/relay/aot/test_used_memory_annotator.py: ########## @@ -0,0 +1,194 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# pylint: disable=invalid-name + +""" +Testing for the pass that annotates used memory for each primitive +Relay function. +""" + +import tvm +from tvm import relay +from tvm.relay.expr_functor import ExprVisitor + + +def AnnotateUsedMemory(): + return relay.transform._ffi_api.AnnotateUsedMemory() + + +class CheckUsedMemoryAnnotation(ExprVisitor): + """ + Check that the annotations on each function in the graph match + what is expected. + """ + + def __init__(self, expected_annotations): + self.expected_annotations = expected_annotations + super().__init__() + + def visit_function(self, fn): + if "Primitive" in fn.attrs: + assert ( + "used_memory" in fn.attrs + ), "Primitive function does not have used_memory annotation." + + assert len(self.expected_annotations) > 0, "Not all expected annotations were compared" + + expected_mem = self.expected_annotations.pop(0) + actual_mem = fn.attrs["used_memory"] + assert expected_mem == actual_mem, ( + f"Expected used memory annotation {expected_mem} " + f"did not match actual annotation {actual_mem}" + ) + super().visit_function(fn) + + +def _check_used_memory_annotations(mod, expected_annotations): + mod = relay.transform.InferType()(mod) + mod = relay.transform.ToANormalForm()(mod) + mod = relay.transform.InferType()(mod) + mod = AnnotateUsedMemory()(mod) + + CheckUsedMemoryAnnotation(expected_annotations).visit(mod["main"].body) + + +def _create_primitive_function(expr): + func = relay.Function(relay.analysis.free_vars(expr), expr) + func = func.with_attr("Primitive", 1) + return func + + +def test_simple(): + """ + Test simple graph with one primitive function. + """ + + def get_inner_func(): + x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8") + x = relay.nn.max_pool2d(x) + x = _create_primitive_function(x) + return x + + ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8") + call = relay.Call(get_inner_func(), [ifm]) + mod = tvm.IRModule.from_expr(call) + + expected_annotations = [2 * (1 * 2 * 2 * 4)] + _check_used_memory_annotations(mod, expected_annotations) + + +def test_multiple_functions(): + """ + Test a graph with multiple primitive functions. + """ + + def get_inner_func(ifm_shape): + x = relay.var("x", shape=ifm_shape, dtype="int8") + x = relay.nn.max_pool2d(x, pool_size=(2, 2), layout="NHWC") + x = _create_primitive_function(x) + return x + + ifm = relay.var("input", shape=(1, 8, 8, 2), dtype="int8") + x = get_inner_func((1, 8, 8, 2)) + x = relay.Call(x, [ifm]) + y = get_inner_func((1, 7, 7, 2)) + y = relay.Call(y, [x]) + z = get_inner_func((1, 6, 6, 2)) + z = relay.Call(z, [y]) + mod = tvm.IRModule.from_expr(z) + + expected_annotations = [ + (1 * 8 * 8 * 2) + (1 * 7 * 7 * 2), + (1 * 7 * 7 * 2) + (1 * 6 * 6 * 2), + (1 * 6 * 6 * 2) + (1 * 5 * 5 * 2), + ] + _check_used_memory_annotations(mod, expected_annotations) + + +def test_mixed_data_types(): + """ + Test a graph with a primitive function that has mixed datatypes. + """ + + def get_inner_func(): + x = relay.var("x", shape=(1, 2, 2, 2), dtype="int16") + x = relay.cast(x, dtype="uint32") + x = _create_primitive_function(x) + return x + + ifm = relay.var("input", shape=(1, 2, 2, 2), dtype="int16") + x = get_inner_func() + x = relay.Call(x, [ifm]) + mod = tvm.IRModule.from_expr(x) + + expected_annotations = [ + (1 * 2 * 2 * 2) * 2 + (1 * 2 * 2 * 2) * 4, + ] + _check_used_memory_annotations(mod, expected_annotations) + + +def test_parallel_function_call(): Review Comment: Few suggestions for more test cases : 1) Nested branches 2) Long branches (>3) where each/some branch has more than one operator. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
