This is an automated email from the ASF dual-hosted git repository.

bgawrych pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/v1.x by this push:
     new ca3835a  Remove compilation warning (#20854)
ca3835a is described below

commit ca3835a9371a283591fbf0ade27ef432675a7934
Author: Andrzej KotÅ‚owski <[email protected]>
AuthorDate: Mon Jan 31 15:44:47 2022 +0100

    Remove compilation warning (#20854)
    
    * Remove compilation warning
    
    It gets rid of compiltation warning:
    'float' changes value from 2147483647 to 2147483648
    for -Wimplicit-const-int-float-conversion flag
    
    * Apply review comment
---
 src/operator/quantization/mkldnn/mkldnn_quantized_elemwise_add.cc | 6 ++----
 src/operator/quantization/quantized_elemwise_mul.cc               | 5 ++---
 2 files changed, 4 insertions(+), 7 deletions(-)

diff --git a/src/operator/quantization/mkldnn/mkldnn_quantized_elemwise_add.cc 
b/src/operator/quantization/mkldnn/mkldnn_quantized_elemwise_add.cc
index 10f21bc..6a6c773 100644
--- a/src/operator/quantization/mkldnn/mkldnn_quantized_elemwise_add.cc
+++ b/src/operator/quantization/mkldnn/mkldnn_quantized_elemwise_add.cc
@@ -125,7 +125,8 @@ static void MKLDNNQuantizedElemwiseAddForward(const 
nnvm::NodeAttrs& attrs,
   mkldnn::memory* rescaled_mem;
 
   // output default set as int32
-  float output_data_range = kInt32Range;
+  // The impact of rounding in line below is negligible.
+  float output_data_range = static_cast<float>(kInt32Range);
   auto output_data_type   = mkldnn::memory::data_type::s32;
   // dataA && dataB are uint8
   if (out_data[quantized_elemwise_add_enum::kOut].dtype() == mshadow::kInt8) {
@@ -134,9 +135,6 @@ static void MKLDNNQuantizedElemwiseAddForward(const 
nnvm::NodeAttrs& attrs,
   } else if (out_data[quantized_elemwise_add_enum::kOut].dtype() == 
mshadow::kUint8) {
     output_data_range = kUint8Range;
     output_data_type  = mkldnn::memory::data_type::u8;
-  } else {
-    output_data_range = kInt32Range;
-    output_data_type  = mkldnn::memory::data_type::s32;
   }
 
   float output_min     = 0;
diff --git a/src/operator/quantization/quantized_elemwise_mul.cc 
b/src/operator/quantization/quantized_elemwise_mul.cc
index fb0df3c..5e6a403 100644
--- a/src/operator/quantization/quantized_elemwise_mul.cc
+++ b/src/operator/quantization/quantized_elemwise_mul.cc
@@ -139,12 +139,11 @@ void QuantizedElemwiseMulOpForward(const nnvm::NodeAttrs 
&attrs,
   float out_data_scale = 1.f;
   float out_scale = 1.f;
   if (!params.enable_float_output) {
-    float output_data_range = kInt32Range;
+    // The impact of rounding in line below is negligible
+    float output_data_range = static_cast<float>(kInt32Range);
     // dataA && dataB are int8
     if (outputs[quantized_elemwise_mul::kOut].type_flag_ == mshadow::kInt8) {
       output_data_range = kInt8Range;
-    } else {
-      output_data_range = kInt32Range;
     }
     if (params.max_calib_range.has_value() && 
params.min_calib_range.has_value()) {
       cached_output_min_ = params.min_calib_range.value();

Reply via email to