cconvey commented on code in PR #11490:
URL: https://github.com/apache/tvm/pull/11490#discussion_r883758337


##########
tests/python/contrib/test_hexagon/benchmark_elemwise_add.py:
##########
@@ -0,0 +1,468 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import os
+import os.path
+import sys
+import pytest
+import numpy as np
+import logging
+import tempfile
+
+import tvm.testing
+import tvm.script
+from tvm.script import tir as T
+from tvm import te
+from tvm.contrib.hexagon.build import HexagonLauncherRPC
+from .benchmark_util import BenchmarksTable
+
+RPC_SERVER_PORT = 7070
+
+# This is a fixed detail of the v68 architecture.
+HVX_VECTOR_BYTES = 128
+
+# NOTE on server ports:
+# These tests use different port numbers for the RPC server (7070 + ...).
+# The reason is that an RPC session cannot be gracefully closed without
+# triggering TIME_WAIT state on the server socket. This prevents another
+# server to bind to the same port until the wait time elapses.
+
+_BT = BenchmarksTable()
+
+_CSV_COLUMN_ORDER = [
+    # Identifies which TE-compute / TIRScript is used as the basis for the
+    # benchmarked primfunc. Only needs to be meaningful to humans.
+    "basic_kernel",
+    # The tensors 'element type
+    "dtype",
+    # When applicable, indicates the particular variation of schedules
+    # apply by the Python code. Decoding this may require looking at this
+    # script's source code.
+    "sched_type",
+    # The memory location of the tensors used during the execution of
+    # the primfunc.  We currently assume just one location.
+    # This will likely need to be generalized as we add more sophisticated
+    # primfuncs.
+    "mem_scope",
+    # For primfuncs that treat tensor buffers as collections of 1D vectors,
+    # this is the number of vectors in each tensor.
+    # This will likely need to be generalized as we add more sophisticated
+    # primfuncs.
+    "num_vectors_per_tensor",
+    # Reserved columns defined by the BenchmarksTable class.
+    "row_status",
+    "timings_min_usecs",
+    "timings_max_usecs",
+    "timings_median_usecs",
+    "timings_mean_usecs",
+    "timings_stddev_usecs",
+    # For benchmarks that produce files on the host file system, this indicates
+    # their location. Useful for post-mortem investigation of benchmark 
results.
+    "host_files_dir",
+    # Miscellaneous comments about the benchmark.
+    "comments",
+]
+
+_HOST_OUTPUT_DIR = tempfile.mkdtemp()
+
+print("-" * 80)
+print("OUTPUT DIRECTORY: {}".format(_HOST_OUTPUT_DIR))
+print("-" * 80)
+print()
+
+
+def _get_benchmark_id(keys_dict):
+    """
+    Given a dictionary with the distinguishing characteristics of a particular 
benchmark
+    line item, compute a string that uniquely identifies the benchmark.
+
+    The returned string:
+    - is a valid directory name on the host's file system
+    - should be easy for humans to parse
+
+    Note that the insertion order for `keys_dict` does affect the computed 
name.
+    """
+    return "-".join([f"{k}:{v}" for k, v in keys_dict.items()])
+
+
+def _get_benchmark_decription(keys_dict):
+    """
+    Similar to `_get_benchmark_id`, but the focus is on human-readability.
+
+    The returned string contains no line-breaks, but may contain spaces and
+    other characters that make it unsuitable for use as a filename.
+    """
+    return " ".join([f"{k}={v}" for k, v in keys_dict.items()])
+
+
[email protected]_hexagon
+def test_elemwise_add_tvmcript(hexagon_launcher: HexagonLauncherRPC):
+    """
+    Similar to `test_elemwise_add_te`, but starting with TensorScript rather 
than
+    Tensor Expressions.
+    """
+
+    # Create and benchmark a single primfunc.
+    # If an unexpected problem occurs, raise an exception.  Otherwise add a 
row of output to 'bt'.
+    def test_one_config(dtype, mem_scope, num_vectors_per_tensor):

Review Comment:
   IIUC, you're suggesting that we divide `test_one_config(...)` into two parts:
   - part 1: returns whatever `tvm.build(...)` produces.
   - part 2: executes / benchmarks whatever part1 produced.
   
   And while part 1 would have separate implementations for the TE-compute vs. 
TIR-script primfuncs, part 2 would only need a single implementation.  Right?



##########
tests/python/contrib/test_hexagon/benchmark_elemwise_add.py:
##########
@@ -0,0 +1,468 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import os
+import os.path
+import sys
+import pytest
+import numpy as np
+import logging
+import tempfile
+
+import tvm.testing
+import tvm.script
+from tvm.script import tir as T
+from tvm import te
+from tvm.contrib.hexagon.build import HexagonLauncherRPC
+from .benchmark_util import BenchmarksTable
+
+RPC_SERVER_PORT = 7070
+
+# This is a fixed detail of the v68 architecture.
+HVX_VECTOR_BYTES = 128
+
+# NOTE on server ports:
+# These tests use different port numbers for the RPC server (7070 + ...).
+# The reason is that an RPC session cannot be gracefully closed without
+# triggering TIME_WAIT state on the server socket. This prevents another
+# server to bind to the same port until the wait time elapses.
+
+_BT = BenchmarksTable()
+
+_CSV_COLUMN_ORDER = [
+    # Identifies which TE-compute / TIRScript is used as the basis for the
+    # benchmarked primfunc. Only needs to be meaningful to humans.
+    "basic_kernel",
+    # The tensors 'element type
+    "dtype",
+    # When applicable, indicates the particular variation of schedules
+    # apply by the Python code. Decoding this may require looking at this
+    # script's source code.
+    "sched_type",
+    # The memory location of the tensors used during the execution of
+    # the primfunc.  We currently assume just one location.
+    # This will likely need to be generalized as we add more sophisticated
+    # primfuncs.
+    "mem_scope",
+    # For primfuncs that treat tensor buffers as collections of 1D vectors,
+    # this is the number of vectors in each tensor.
+    # This will likely need to be generalized as we add more sophisticated
+    # primfuncs.
+    "num_vectors_per_tensor",
+    # Reserved columns defined by the BenchmarksTable class.
+    "row_status",
+    "timings_min_usecs",
+    "timings_max_usecs",
+    "timings_median_usecs",
+    "timings_mean_usecs",
+    "timings_stddev_usecs",
+    # For benchmarks that produce files on the host file system, this indicates
+    # their location. Useful for post-mortem investigation of benchmark 
results.
+    "host_files_dir",
+    # Miscellaneous comments about the benchmark.
+    "comments",
+]
+
+_HOST_OUTPUT_DIR = tempfile.mkdtemp()
+
+print("-" * 80)
+print("OUTPUT DIRECTORY: {}".format(_HOST_OUTPUT_DIR))
+print("-" * 80)
+print()
+
+
+def _get_benchmark_id(keys_dict):
+    """
+    Given a dictionary with the distinguishing characteristics of a particular 
benchmark
+    line item, compute a string that uniquely identifies the benchmark.
+
+    The returned string:
+    - is a valid directory name on the host's file system
+    - should be easy for humans to parse
+
+    Note that the insertion order for `keys_dict` does affect the computed 
name.
+    """
+    return "-".join([f"{k}:{v}" for k, v in keys_dict.items()])
+
+
+def _get_benchmark_decription(keys_dict):
+    """
+    Similar to `_get_benchmark_id`, but the focus is on human-readability.
+
+    The returned string contains no line-breaks, but may contain spaces and
+    other characters that make it unsuitable for use as a filename.
+    """
+    return " ".join([f"{k}={v}" for k, v in keys_dict.items()])
+
+
[email protected]_hexagon
+def test_elemwise_add_tvmcript(hexagon_launcher: HexagonLauncherRPC):
+    """
+    Similar to `test_elemwise_add_te`, but starting with TensorScript rather 
than
+    Tensor Expressions.
+    """
+
+    # Create and benchmark a single primfunc.
+    # If an unexpected problem occurs, raise an exception.  Otherwise add a 
row of output to 'bt'.
+    def test_one_config(dtype, mem_scope, num_vectors_per_tensor):

Review Comment:
   IIUC, you're suggesting that we divide `test_one_config(...)` into two parts:
   - part 1: returns whatever `tvm.build(...)` produces.
   - part 2: executes / benchmarks whatever part1 produced.
   
   And while part 1 would have separate implementations for the TE-compute vs. 
TIR-script primfuncs, part 2 would only need a single implementation?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to