yangjunpro commented on a change in pull request #5962:
URL: https://github.com/apache/incubator-tvm/pull/5962#discussion_r449937406



##########
File path: python/tvm/ansor/workload_registry.py
##########
@@ -0,0 +1,170 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+Workload registration and serialization.
+
+We use a json string to represent a workload (a compute dag).
+The format of the string is `[func_name, [args...]]`.
+The dag should be the return value of this `func_name(*args)`.
+
+Rationale: The workload is actually a compute dag defined by tvm dsl. But 
serializing compute dags
+and matching them efficiently is not easy. Therefore, we use the above string 
to encode a compute
+dag.
+These strings are efficient for serialization/matching and wont' be too long.
+When we need the dag, we decode the string and call the function, which will 
return the dag.
+"""
+
+import pickle
+import json
+
+import tvm._ffi
+from .utils import serialize_args, deserialize_args
+
+WORKLOAD_FUNC_REGISTRY = {}
+
+
+def register_workload_by_func(func):
+    """ Register a workload by generation function.

Review comment:
       So in the comments "generator function" may be less ambiguous:)

##########
File path: python/tvm/ansor/workload_registry.py
##########
@@ -0,0 +1,170 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+Workload registration and serialization.
+
+We use a json string to represent a workload (a computation graph).
+The format of the string is `[func_name, [args...]]`.
+The dag should be the return value of this `func_name(*args)`.
+
+Rationale: The workload is actually a compute dag defined by tvm dsl. But 
serializing compute dags
+and matching them efficiently is not easy. Therefore, we use the above string 
to encode a compute
+dag.
+These strings are efficient for serialization/matching and wont' be too long.
+When we need the dag, we decode the string and call the function, which will 
return the dag.
+"""
+
+import pickle
+import json
+
+import tvm._ffi
+from .utils import serialize_args, deserialize_args
+
+WORKLOAD_FUNC_REGISTRY = {}
+
+
+def register_workload(func):
+    """ Register a workload by generation function.
+
+    The input function should take hashable and jsonable arguments
+    (int, float, tuple of int, tvm.tensor.Tensor, ...) and return a list of 
tvm.tensor.Tensor.
+
+    Parameters
+    ----------
+    func : Function
+        The generation function that returns the compute declaration Tensors.
+
+    Examples
+    --------
+    @ansor.register_workload
+    def matmul(N, M, K):
+        A = te.placeholder((N, K), name='A')
+        B = te.placeholder((K, M), name='B')
+        k = te.reduce_axis((0, K), name='k')
+        C = te.compute((N, M), lambda i, j: tvm.sum(A[i][k] * B[k][j], 
axis=[k]), name='C')
+        return [A, B, C]
+    """
+    assert callable(func)
+    func_name = func.__name__
+    if func_name in WORKLOAD_FUNC_REGISTRY:
+        raise RuntimeError('%s has been registered already' % func_name)
+
+    WORKLOAD_FUNC_REGISTRY[func_name] = func
+    return func
+
+
+def make_workload_key(func, args):
+    """ make a workload key from function and arguments.
+
+    Parameters
+    ----------
+    func : Union[Function, str]
+        The function that returns the compute declaration Tensors.
+        Can be the a function or the function name.
+    args : Args
+        The args of the function.
+
+    Returns
+    -------
+    workload_key : Str
+        The workload key of the function.
+    """
+    if callable(func):
+        func_name = func.__name__
+    elif isinstance(func, str):
+        func_name = func
+    else:
+        raise ValueError("Invalid function: " + str(func))
+
+    if not func_name in WORKLOAD_FUNC_REGISTRY:
+        raise ValueError("%s is not registered. "  % func,
+                         "Please register it with @ansor.register_workload")
+
+    args = serialize_args(args)
+
+    return json.dumps((func_name,) + args)
+
+
+def decode_workload_key_to_func_args(workload_key):
+    """ Decode a workload key to the registerd function name and its 
corresponding args.
+
+    Parameters
+    ----------
+    workload_key : str
+        The input workload key.
+
+    Returns
+    -------
+    name : str
+        The function name of this workload key.
+    args : List[Tensor]
+        The args of the generation function.
+    """
+    workload = json.loads(workload_key)
+    if not workload[0] in WORKLOAD_FUNC_REGISTRY:
+        raise ValueError("%s is not registered. " % workload[0] +
+                         "Please register it with @ansor.register_workload")
+    return workload[0], deserialize_args(workload[1:])
+
+
+@tvm._ffi.register_func("ansor.workload_key_to_tensors")
+def workload_key_to_tensors(workload_key):
+    """ Get the input/output tensors from the workload key.
+
+    This method is usually used to create a ComputeDAG by workload key.
+
+    Parameters
+    ----------
+    workload_key : str
+        The input workload key.
+
+    Returns
+    -------
+    tensors : List[Tensor]
+        The registered compute declaration Tensors.
+    """
+    name, args = decode_workload_key_to_func_args(workload_key)
+    lookup = WORKLOAD_FUNC_REGISTRY[name]
+    assert callable(lookup)
+    return lookup(*args)
+
+
+def save_workload_func_registry(filename):
+    """ Dump workload function registry to a pickle binary file.
+
+    Parameters
+    ----------
+    filename : str
+        The filename to dump workload function registry to.
+    """
+    global WORKLOAD_FUNC_REGISTRY

Review comment:
       Why do we only have a explicit _global_ here? 

##########
File path: src/ansor/auto_schedule.cc
##########
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file ansor/auto_schedule.cc
+ * \brief The user interface of the Ansor auto-scheduler.
+ */
+
+#include "auto_schedule.h"
+
+#include <tvm/runtime/registry.h>
+
+namespace tvm {
+namespace ansor {
+
+TVM_REGISTER_NODE_TYPE(TuningOptionsNode);
+
+TuningOptions::TuningOptions(int num_measure_trials, int early_stopping, int 
num_measures_per_round,
+                             int verbose, ProgramBuilder builder, 
ProgramRunner runner,
+                             Array<MeasureCallback> measure_callbacks,
+                             Array<SearchCallback> pre_search_callbacks) {
+  auto node = make_object<TuningOptionsNode>();

Review comment:
       Why not directly assign _data__ with _make_object<TuningOptionsNode>()_? 

##########
File path: python/tvm/ansor/workload_registry.py
##########
@@ -0,0 +1,170 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+Workload registration and serialization.
+
+We use a json string to represent a workload (a computation graph).
+The format of the string is `[func_name, [args...]]`.
+The dag should be the return value of this `func_name(*args)`.
+
+Rationale: The workload is actually a compute dag defined by tvm dsl. But 
serializing compute dags
+and matching them efficiently is not easy. Therefore, we use the above string 
to encode a compute
+dag.
+These strings are efficient for serialization/matching and wont' be too long.
+When we need the dag, we decode the string and call the function, which will 
return the dag.
+"""
+
+import pickle
+import json
+
+import tvm._ffi
+from .utils import serialize_args, deserialize_args
+
+WORKLOAD_FUNC_REGISTRY = {}
+
+
+def register_workload(func):
+    """ Register a workload by generation function.
+
+    The input function should take hashable and jsonable arguments
+    (int, float, tuple of int, tvm.tensor.Tensor, ...) and return a list of 
tvm.tensor.Tensor.
+
+    Parameters
+    ----------
+    func : Function
+        The generation function that returns the compute declaration Tensors.
+
+    Examples
+    --------
+    @ansor.register_workload
+    def matmul(N, M, K):
+        A = te.placeholder((N, K), name='A')
+        B = te.placeholder((K, M), name='B')
+        k = te.reduce_axis((0, K), name='k')
+        C = te.compute((N, M), lambda i, j: tvm.sum(A[i][k] * B[k][j], 
axis=[k]), name='C')
+        return [A, B, C]
+    """
+    assert callable(func)
+    func_name = func.__name__
+    if func_name in WORKLOAD_FUNC_REGISTRY:
+        raise RuntimeError('%s has been registered already' % func_name)
+
+    WORKLOAD_FUNC_REGISTRY[func_name] = func

Review comment:
       Do we need to explicitly add a _global WORKLOAD_FUNC_REGISTRY_ statement?

##########
File path: src/ansor/auto_schedule.cc
##########
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * \file ansor/auto_schedule.cc
+ * \brief The user interface of the Ansor auto-scheduler.
+ */
+
+#include "auto_schedule.h"
+
+#include <tvm/runtime/registry.h>
+
+namespace tvm {
+namespace ansor {
+
+TVM_REGISTER_NODE_TYPE(TuningOptionsNode);
+
+TuningOptions::TuningOptions(int num_measure_trials, int early_stopping, int 
num_measures_per_round,
+                             int verbose, ProgramBuilder builder, 
ProgramRunner runner,
+                             Array<MeasureCallback> measure_callbacks,
+                             Array<SearchCallback> pre_search_callbacks) {
+  auto node = make_object<TuningOptionsNode>();
+  node->num_measure_trials = num_measure_trials;
+  node->early_stopping = early_stopping;
+  node->num_measures_per_round = num_measures_per_round;
+  node->verbose = verbose;
+  node->builder = std::move(builder);
+  node->runner = std::move(runner);
+  node->measure_callbacks = std::move(measure_callbacks);
+  node->pre_search_callbacks = std::move(pre_search_callbacks);
+  data_ = std::move(node);
+}
+
+std::pair<te::Schedule, Array<te::Tensor> > AutoSchedule(SearchTask task,
+                                                         SearchPolicy 
search_policy,
+                                                         TuningOptions 
tuning_options) {
+  // Create a ProgramMeasurer to handle the schedule build and performance 
measure
+  ProgramMeasurer measurer =
+      ProgramMeasurer(tuning_options->builder, tuning_options->runner,
+                      tuning_options->measure_callbacks, 
tuning_options->verbose);
+  // Search for the best schedule
+  State state = search_policy->Search(
+      task, tuning_options->num_measure_trials, tuning_options->early_stopping,

Review comment:
       I think one reasons is that _tuniong_options_ contains more what  
ProgramMeasure constructor needs, so they choose to expose as little as 
possible between different modules? 
   
   I am also wondering whether it is better to pack those lengthy things into a 
single holder to avoid the bulky  argument pass-in.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to