bartekkuncer commented on code in PR #21106:
URL: https://github.com/apache/incubator-mxnet/pull/21106#discussion_r931897587


##########
src/operator/nn/dnnl/dnnl_masked_softmax.cc:
##########
@@ -31,12 +31,16 @@ namespace op {
 // Support for https://oneapi-src.github.io/oneDNN/v2.6/dev_guide_softmax.html
 bool SupportDNNLMaskedSoftmax(const MaskedSoftmaxParam& param, const 
std::vector<NDArray>& inputs) {
   CHECK_EQ(inputs.size(), 2);
+  const auto data = inputs[0];
   const auto mask = inputs[1];
   SoftmaxParam softmax_param;
   softmax_param.axis        = param.axis;
-  softmax_param.dtype       = inputs[0].dtype();
+  softmax_param.dtype       = data.dtype();
   softmax_param.temperature = param.temperature;
-  return mask.dtype() == mshadow::kBool && SupportDNNLSoftmax(softmax_param, 
inputs[0]);
+  // threshold value selected experimentally basing on performance results - 
PR-21106
+  const size_t optimal_size_threshold = 2 << 13;

Review Comment:
   ```suggestion
     constexpr size_t optimal_size_threshold = 2 << 13;
   ```



##########
src/operator/nn/dnnl/dnnl_binary.cc:
##########
@@ -65,7 +65,11 @@ void DNNLBinaryOpFwd::Execute(const std::vector<NDArray>& 
inputs,
 
 // Support for https://oneapi-src.github.io/oneDNN/v2.6/dev_guide_binary.html
 bool SupportDNNLBinary(const std::vector<NDArray>& inputs) {
-  return SupportDNNL<DNNLTypeMode::FloatTypes>(inputs[1]) &&
+  // threshold value selected experimentally basing on performance results - 
PR-21106
+  const size_t optimal_size_threshold = 2 << 13;

Review Comment:
   ```suggestion
     constexpr size_t optimal_size_threshold = 2 << 13;
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to