This is an automated email from the ASF dual-hosted git repository.

junrushao pushed a commit to branch unity
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/unity by this push:
     new 5a44262502 [Unity] Fix memory statistics issues in 
estimate_memory_usage (#15224)
5a44262502 is described below

commit 5a4426250236ea1df4765ddb48c990c82b773776
Author: Yixin Dong <[email protected]>
AuthorDate: Wed Jul 5 14:22:07 2023 +0800

    [Unity] Fix memory statistics issues in estimate_memory_usage (#15224)
    
    Previously `tvm.relax.analysis.estimate_memory_usage` will ignore 
`R.builtin.alloc_tensor` operators when calculating memory usage of the module 
**after** memory planning. However, such operators indeed increases the memory 
usage.
    
    This PR will take such operators into account so as to get correct memory 
statistics.
---
 python/tvm/relax/analysis/estimate_memory_usage.py | 28 +++++++++++++++++-----
 .../relax/test_analysis_estimate_memory_usage.py   |  5 ++--
 2 files changed, 25 insertions(+), 8 deletions(-)

diff --git a/python/tvm/relax/analysis/estimate_memory_usage.py 
b/python/tvm/relax/analysis/estimate_memory_usage.py
index 014a8e0d49..fa1b6eda04 100644
--- a/python/tvm/relax/analysis/estimate_memory_usage.py
+++ b/python/tvm/relax/analysis/estimate_memory_usage.py
@@ -93,7 +93,7 @@ def estimate_memory_usage(mod: Union[IRModule, Function]) -> 
str:
 
                 self.cleanup()
                 self.visit_expr(func)
-                estimation += self.generate_est_string(global_var.name_hint)
+                estimation += self.generate_est_string(global_var.name_hint) + 
"\n"
 
             if estimation != "":
                 estimation = "Memory usage estimation:\n" + estimation
@@ -108,13 +108,15 @@ def estimate_memory_usage(mod: Union[IRModule, Function]) 
-> str:
 
         def visit_call_(self, call: Call) -> None:  # pylint: 
disable=arguments-differ
             if call.op == self.builtin_alloc_tensor_op:
-                self.accumulate_tensor_alloc(shape=call.args[0], 
dtype_str=call.args[1].value)
+                self.accumulate_builtin_tensor_alloc(
+                    shape=call.args[0], dtype_str=call.args[1].value
+                )
             elif call.op == self.memory_alloc_tensor_op:
                 self.accumulate_tensor_alloc(shape=call.args[2], 
dtype_str=call.args[3].value)
             elif call.op == self.memory_alloc_storage_op:
                 self.accumulate_storage_alloc(size=call.args[0])
 
-        def accumulate_tensor_alloc(self, shape: Expr, dtype_str: str) -> None:
+        def calculate_size(self, shape: Expr, dtype_str: str) -> int:
             if not isinstance(shape, ShapeExpr):
                 raise TypeError(
                     "The shape of relax.builtin.alloc_tensor and "
@@ -124,12 +126,26 @@ def estimate_memory_usage(mod: Union[IRModule, Function]) 
-> str:
             for dim_len in shape.values:
                 if not isinstance(dim_len, tvm.tir.IntImm):
                     self.total_dyn_size_tensor_num += 1
-                    return
+                    return -1
                 size *= dim_len.value
-
             dtype = tvm.DataType(dtype_str)
+            return size * ((dtype.bits + 7) // 8) * dtype.lanes
+
+        def accumulate_builtin_tensor_alloc(self, shape: Expr, dtype_str: str) 
-> None:
+            size = self.calculate_size(shape, dtype_str)
+            if size == -1:
+                return
+            self.total_const_size_tensor_num += 1
+            self.total_alloc_tensor_mem += size
+            self.planned_mem_num += 1
+            self.planned_alloc_mem += size
+
+        def accumulate_tensor_alloc(self, shape: Expr, dtype_str: str) -> None:
+            size = self.calculate_size(shape, dtype_str)
+            if size == -1:
+                return
             self.total_const_size_tensor_num += 1
-            self.total_alloc_tensor_mem += (size * dtype.bits * dtype.lanes + 
7) // 8
+            self.total_alloc_tensor_mem += size
 
         def accumulate_storage_alloc(self, size: Expr) -> None:
             if not isinstance(size, ShapeExpr):
diff --git a/tests/python/relax/test_analysis_estimate_memory_usage.py 
b/tests/python/relax/test_analysis_estimate_memory_usage.py
index 32bb56a670..31419b544d 100644
--- a/tests/python/relax/test_analysis_estimate_memory_usage.py
+++ b/tests/python/relax/test_analysis_estimate_memory_usage.py
@@ -117,8 +117,9 @@ def test_basic():
         == r"""Memory usage estimation:
 - Function main:
  * Without memory planning, there are 5 constant-size memory allocation(s) 
with total size 1.639e-07 GB.
- * With memory planning, there are 2 constant-size memory allocation(s) with 
total size 6.706e-08 GB.
- * Memory planning reduces constant memory size to 40.9%."""
+ * With memory planning, there are 3 constant-size memory allocation(s) with 
total size 1.043e-07 GB.
+ * Memory planning reduces constant memory size to 63.6%.
+"""
     )
 
 

Reply via email to