echuraev commented on code in PR #13837:
URL: https://github.com/apache/tvm/pull/13837#discussion_r1087810934


##########
python/tvm/relay/op/contrib/clml.py:
##########
@@ -387,3 +393,769 @@ def __exit__(self, ptype, value, trace):
         self.op.reset_attr(self.attr_key)
         if self.older_attr:
             self.op.set_attr(self.attr_key, self.older_attr)
+
+
+class CLMLGetSubModuleSrc:
+    """Generates CLML API one CLML sub module out ot global TVM module"""
+
+    def __init__(self, cmod):
+        """Initialize
+        Parameters
+        ----------
+        cmod : Module
+            The CLML sub module from TVM module
+        """
+        self.cmod = cmod
+        self.codegen = None
+        self.nodes = None
+        self.node_map = {}
+        self.input_meta = []
+        self.output_meta = []
+        self.clml_code = []
+        self.sub_module_name = None
+
+        self.MakeCLMLTensor = Template(
+            """auto $name = runner.MakeCLMLTensor
+        (std::vector<size_t>({$shape}), "$dtype", $layout);"""
+        )
+        self.MapInsert = Template("""runner.storage_map.insert({"$nid", 
$tensor_desc});""")
+        self.MakeConv2D = Template(
+            """
+        // Convolution / Depthwise Convolution
+        runner.MakeConv2D($input_tensor,
+           $weight_tensor,
+           $bias_tensor,
+           $output_tensor,
+           std::vector<cl_uint>({$padding}),
+           std::vector<cl_uint>({$dilation}),
+           std::vector<cl_uint>({$strides}),
+           $groups,
+           $mode,
+           $activation,
+           $has_bias,
+           $has_act,
+           "$dtype");"""
+        )
+        self.MakeConv2DWithBN = Template(
+            """
+        // Batchnorm
+        runner.MakeConv2DWithBN($input_tensor,
+                 $weight_tensor,
+                 $bias_tensor,
+                 $output_tensor,
+                 $bn_scale_tensor,
+                 $bn_bias_tensor,
+                 $bn_mean_tensor,
+                 $bn_var_tensor,
+                 std::vector<float>  ({$bn_attrs}),
+                 std::vector<cl_uint> ({$padding}),
+                 std::vector<cl_uint> ({$dilation}),
+                 std::vector<cl_uint> ({$strides}),
+                 $groups,
+                 $mode,
+                 $activation,
+                 $has_bias,
+                 $has_act,
+                 "$dtype");"""
+        )
+        self.MakeRelu = Template(
+            """
+        // Relu / Relu6
+        runner.MakeRelu($input_tensor, $output_tensor, $relu_type, "$dtype");
+        """
+        )
+        self.MakeBN = Template(
+            """
+        // Batchnorm
+        runner.MakeBatchNorm($input_tensor,
+              $output_tensor,
+              $bn_scale_tensor,
+              $bn_bias_tensor,
+              $bn_mean_tensor,
+              $bn_var_tensor,
+              std::vector<float> ({$bn_attrs}), "$dtype");"""
+        )
+        self.MakePool2D = Template(
+            """
+        // Pool2D
+        runner.MakePool2D($input_tensor,
+           $output_tensor,
+           std::vector<cl_uint> ({$pool_size}),
+           std::vector<cl_uint> ({$strides}),
+           std::vector<cl_uint> ({$padding}),
+           "$pool_type", "$dtype");"""
+        )
+        self.MakeGlobalPool2D = Template(
+            """
+        // GlobalPool2D
+        runner.MakeGlobalPool2D($input_tensor,
+                 $output_tensor,
+                 std::vector<cl_uint> ({$in_shape}),
+                 "$pool_type", "$dtype");"""
+        )
+        self.MakeReshape = Template(
+            """
+        // Reshape
+        runner.MakeReshape($input_tensor,
+            $output_tensor, "$dtype");"""
+        )
+        self.MakeConcatenate = Template(
+            """
+        // Concatinate
+        runner.MakeConcatenate(
+                std::vector<std::shared_ptr<cl_ml_tensor_memory_desc_qcom>> 
({$in_list}),
+                $output_tensor,
+                $axis, "$dtype");"""
+        )
+        self.MakeDense = Template(
+            """
+        // Dense
+        runner.MakeDense($input_tensor,
+          $weight_tensor,
+          $output_tensor,
+          $bias_tensor, "$dtype");"""
+        )
+        self.MakeSoftMax = Template(
+            """
+        // Softmax
+        runner.MakeSoftMax($input_tensor,
+            $output_tensor, "$dtype");"""
+        )
+        self.MakePad = Template(
+            """
+        // Pad
+        runner.MakePad($input_tensor,
+        $output_tensor,
+        "$pad_mode",
+        std::vector<cl_uint> ({$padding}), "$dtype");"""
+        )
+        self.MakeBatchFlatten = Template(
+            """
+        // BatchFlatten
+        runner.MakeBatchFlatten($input_tensor,
+                 $output_tensor, "$dtype");"""
+        )
+        self.MakeClip = Template(
+            """
+        // Clip
+        runner.MakeClip($input_tensor,
+         $output_tensor,
+         $a_max,
+         $a_min,
+         "$dtype");"""
+        )
+        self.MakeBinaryOp = Template(
+            """
+        // BinaryOp
+        runner.MakeBinaryOp($input_a,
+             $input_b,
+             $output_tensor,
+             "$op", "$dtype");"""
+        )
+
+        self.MakeHeader = Template(
+            """
+        CLMLRunner $module(std::string name,
+                   ToolArgs& args,
+                   cl_platform_id arg_platform_id,
+                   cl_context arg_context,
+                   cl_device_id arg_device_id,
+                   cl_command_queue arg_queue) {
+        CLMLRunner runner = CLMLRunner(name,
+                                 args,
+                                 arg_platform_id,
+                                 arg_context,
+                                 arg_device_id,
+                                 arg_queue);
+        runner.MakeUnusedTensor();
+        """
+        )
+
+        self.MakeFooter = Template(
+            """
+            return runner;
+        }
+        """
+        )
+
+        self.MakeMetaInfo = Template(
+            "runner.SetMetaInfo("
+            '"Subgraph Name: $name\\n    Input Count  : $input_count\\n'
+            "    Output Count : $output_count\\n"
+            '    Input MetaInfo\\n$input_meta\\n    Output 
MetaInfo\\n$output_meta");'
+        )
+
+        self.MakeInputMetaInfo = Template(
+            "        Input: $in_name\\n            Dtype : $dtype\\n           
 Shape : [$shape]"
+        )
+
+        self.MakeOutputMetaInfo = Template(
+            "        Output: $out_name\\n            Dtype : $dtype\\n         
   Shape : [$shape]"
+        )
+
+    def get_src(self):
+        """Returns pair of sub module name and the generated source"""
+
+        self.codegen = json.loads(self.cmod.get_source("json"))
+        self.sub_module_name = self.codegen["symbol"]
+        self.nodes = self.codegen["nodes"]
+        
self.clml_code.append(self.MakeHeader.substitute(module=self.sub_module_name))
+
+        def get_tensor_from_map(
+            node_seq, shape=None, layout="CL_TENSOR_LAYOUT_OPTIMAL_QCOM", 
dtype="float32"
+        ):
+            if node_seq in self.node_map:
+                return self.node_map[node_seq]
+            else:
+                node = self.nodes[node_seq]
+                dtype = str(node["attrs"]["dtype"][0][0])
+                if shape is None:
+                    shape = str(tuple(node["attrs"]["shape"][0][0]))[1:-1]

Review Comment:
   Thank you for the clarification.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to