bartekkuncer commented on code in PR #21115:
URL: https://github.com/apache/incubator-mxnet/pull/21115#discussion_r944254430


##########
src/operator/subgraph/dnnl/dnnl_transformer_qk_common.h:
##########
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_TRANSFORMER_QK_COMMON_H_
+#define MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_TRANSFORMER_QK_COMMON_H_
+
+#if MXNET_USE_ONEDNN == 1
+
+#include <string>
+#include <vector>
+
+#include "operator/contrib/transformer-inl.h"
+#include "operator/numpy/np_matrix_op-inl.h"
+#include "operator/tensor/matrix_op-inl.h"
+#include "operator/subgraph/common.h"
+#include "dnnl_common.h"
+#include "dnnl_subgraph_base-inl.h"
+#include "dnnl_transformer-inl.h"
+
+namespace mxnet {
+namespace op {
+namespace qk_common {
+
+enum SelectStatusTransformerQK {
+  kFail = 0,
+  kStart,
+  kFirstSwapAx,
+  kSecondSwapAx,
+  kFirstReshape,
+  kSecondReshape,
+  kSuccess
+};
+
+// /*
+// kStart ---> kFirstSwapAx ---> kSecondSwapAx ---> kFirstReshape ---> 
kSecondReshape ---> kSuccess
+// OR
+// kStart ---> kFirstSwapAx ---> kSecondSwapAx ---> kFirstReshape ---> kSuccess
+// Each status except kStart is connected with kFail.
+// */
+
+enum mode { include_split = 0, without_split };
+
+inline bool CheckSwapAxisConditionsQK(const BiDirectedNode& input_node) {
+  if (input_node.outputs.size() != 1)
+    return false;
+  return CheckSwapAxisConditions(*input_node.node);
+}
+
+inline bool CheckReshapeConditionsQK(const BiDirectedNode& input_node, const 
index_t out_index) {
+  if (input_node.outputs.size() != 1)
+    return false;
+  return CheckReshapeConditions(*input_node.node, out_index);
+}
+
+inline bool CheckSplitConditions(const std::vector<const BiDirectedNode*>& 
matched_list,
+                                 const BiDirectedNode& node) {
+  const SplitParam& param = dmlc::get<SplitParam>(node.node->attrs.parsed);
+
+  if (param.axis != -1 || param.sections != 3 || param.squeeze_axis)
+    return false;
+
+  const auto first_reshape  = (*(matched_list.end() - 2))->node;
+  const auto second_reshape = (*(matched_list.end() - 1))->node;
+  if (first_reshape->op() != Op::Get("_npx_reshape") ||
+      second_reshape->op() != Op::Get("_npx_reshape")) {
+    return false;
+  }
+  // 3 sections - ensure that every output is used only once
+  if (node.outputs.size() == 3 && node.outputs.count(first_reshape) &&
+      node.outputs.count(second_reshape)) {
+    return true;
+  }
+
+  return false;
+}
+
+inline bool Select(SelectStatusTransformerQK* status,
+                   std::vector<const BiDirectedNode*>* matched_list,
+                   const BiDirectedNode& seed_node,
+                   const std::shared_ptr<NodeAttr>& node_attr) {
+  if (seed_node.node->op() == Op::Get("batch_dot")) {
+    *status = kStart;
+    matched_list->clear();
+    matched_list->push_back(&seed_node);
+    return true;
+  }
+  return false;
+}
+
+template <mode qk_mode>
+bool SelectInput(SelectStatusTransformerQK* status,
+                 std::vector<const BiDirectedNode*>* matched_list,
+                 const BiDirectedNode& n,
+                 const BiDirectedNode& input_node) {
+  if (*status == kFail || *status == kSuccess || 
input_node.node->is_variable())
+    return false;
+  const auto& raw_input_node = *input_node.node;
+  switch (*status) {
+    case kStart:
+      if (raw_input_node.op() == Op::Get("SwapAxis")) {
+        if (CheckSwapAxisConditionsQK(input_node)) {
+          *status = kFirstSwapAx;
+          matched_list->push_back(&input_node);
+        }
+        return true;
+      }
+      break;
+    case kFirstSwapAx:
+      if (raw_input_node.op() == Op::Get("SwapAxis")) {
+        if (CheckSwapAxisConditionsQK(input_node)) {
+          *status = kSecondSwapAx;
+          matched_list->push_back(&input_node);
+          return true;
+        }
+      }
+      break;
+    case kSecondSwapAx:
+      if (raw_input_node.op() == Op::Get("_npx_reshape")) {
+        // input to reshape must be first or second output from split
+        if (CheckReshapeConditionsQK(input_node, 0) || 
CheckReshapeConditionsQK(input_node, 1)) {
+          *status = kFirstReshape;
+          matched_list->push_back(&input_node);
+          return true;
+        }
+      }
+      break;
+    case kFirstReshape:
+      if (raw_input_node.op() == Op::Get("_npx_reshape")) {
+        if (CheckReshapeConditionsQK(input_node, 0) || 
CheckReshapeConditionsQK(input_node, 1)) {
+          if constexpr (qk_mode == mode::include_split) {
+            *status = kSecondReshape;
+          } else {
+            *status = kSuccess;
+          }
+          matched_list->push_back(&input_node);
+          return true;
+        }
+      }
+      break;
+    case kSecondReshape:
+      if (raw_input_node.op() == Op::Get("_split_v2") &&
+          CheckSplitConditions(*matched_list, input_node)) {
+        *status = kSuccess;
+        return true;
+      }
+      break;
+    default:
+      *status = kFail;
+      return false;
+  }
+  return false;
+}
+
+inline std::vector<BiDirectedNode*> Filter(const SelectStatusTransformerQK& 
status,
+                                           const std::vector<const 
BiDirectedNode*>& matched_list,
+                                           const std::vector<BiDirectedNode*>& 
candidates) {
+  if (status != kSuccess) {
+    return std::vector<BiDirectedNode*>(0);
+  } else {
+    std::vector<BiDirectedNode*> ret;
+    for (auto i : matched_list) {
+      auto non_const_i = const_cast<BiDirectedNode*>(i);
+      if (std::find(candidates.begin(), candidates.end(), non_const_i) != 
candidates.end()) {
+        ret.push_back(non_const_i);
+      }
+    }
+    return ret;
+  }
+}
+
+template <mode qk_mode>
+nnvm::ObjectPtr CreateSubgraphNode(const nnvm::Symbol& sym, const int 
subgraph_id = 0) {
+  std::string op_name;
+  if constexpr (qk_mode == mode::include_split) {
+    op_name = "_sg_onednn_selfatt_qk_split";
+  } else {
+    op_name = "_sg_onednn_selfatt_qk";
+  }
+  nnvm::ObjectPtr n = nnvm::Node::Create();
+  // This op has single output, remove duplicated.
+  auto last_node = sym.outputs[0].node;
+  nnvm::Symbol new_sym;
+  new_sym.outputs.emplace_back(last_node);
+  std::ostringstream node_name;
+
+  DFSVisit(new_sym.outputs, [&](const nnvm::ObjectPtr& node) {
+    if ((node->op() == Op::Get("_npx_reshape"))) {
+      auto const& reshape_param = 
nnvm::get<NumpyXReshapeParam>(node->attrs.parsed);
+      // set heads attribute - all necessary conditions are checked before

Review Comment:
   Inconsistent comments - some start from capital letter, some don't; some end 
with the dot, some don't.



##########
src/operator/subgraph/dnnl/dnnl_transformer.cc:
##########
@@ -86,8 +111,21 @@ static bool SgDNNLSelfAttQKInferType(const nnvm::NodeAttrs& 
attrs,
         << "QuantizedSelfAttentionQK only supports int8 input, while " << 
in_types->at(0)
         << " is given.";
 
-    TYPE_ASSIGN_CHECK(*in_types, 1, mshadow::kFloat32);
-    TYPE_ASSIGN_CHECK(*in_types, 2, mshadow::kFloat32);
+    if constexpr (qk_mode == qk_common::mode::without_split) {
+      if (in_types->at(1) == mshadow::kBfloat16) {
+        return false;
+      }

Review Comment:
   This check is redundant as (if the program gets to this point) we have 
already checked in line 100 that in_type0 == in_type1 and in line 106 that 
in_type0 != bfloat16.



##########
src/operator/subgraph/dnnl/dnnl_transformer.cc:
##########
@@ -86,8 +111,21 @@ static bool SgDNNLSelfAttQKInferType(const nnvm::NodeAttrs& 
attrs,
         << "QuantizedSelfAttentionQK only supports int8 input, while " << 
in_types->at(0)
         << " is given.";
 
-    TYPE_ASSIGN_CHECK(*in_types, 1, mshadow::kFloat32);
-    TYPE_ASSIGN_CHECK(*in_types, 2, mshadow::kFloat32);
+    if constexpr (qk_mode == qk_common::mode::without_split) {
+      if (in_types->at(1) == mshadow::kBfloat16) {
+        return false;
+      }
+      CHECK(in_types->at(1) == mshadow::kInt8)
+          << "QuantizedSelfAttentionQK only supports int8 input, while " << 
in_types->at(1)
+          << " is given.";

Review Comment:
   Same as above.



##########
src/operator/subgraph/dnnl/dnnl_transformer_qk_common.h:
##########
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#ifndef MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_TRANSFORMER_QK_COMMON_H_
+#define MXNET_OPERATOR_SUBGRAPH_DNNL_DNNL_TRANSFORMER_QK_COMMON_H_
+
+#if MXNET_USE_ONEDNN == 1
+
+#include <string>
+#include <vector>
+
+#include "operator/contrib/transformer-inl.h"
+#include "operator/numpy/np_matrix_op-inl.h"
+#include "operator/tensor/matrix_op-inl.h"
+#include "operator/subgraph/common.h"
+#include "dnnl_common.h"
+#include "dnnl_subgraph_base-inl.h"
+#include "dnnl_transformer-inl.h"
+
+namespace mxnet {
+namespace op {
+namespace qk_common {
+
+enum SelectStatusTransformerQK {
+  kFail = 0,
+  kStart,
+  kFirstSwapAx,
+  kSecondSwapAx,
+  kFirstReshape,
+  kSecondReshape,
+  kSuccess
+};
+
+// /*
+// kStart ---> kFirstSwapAx ---> kSecondSwapAx ---> kFirstReshape ---> 
kSecondReshape ---> kSuccess
+// OR
+// kStart ---> kFirstSwapAx ---> kSecondSwapAx ---> kFirstReshape ---> kSuccess
+// Each status except kStart is connected with kFail.
+// */
+
+enum mode { include_split = 0, without_split };

Review Comment:
   Maybe with/without would be better than include/without?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to