This is an automated email from the ASF dual-hosted git repository.

bgawrych pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 183e012f01 Get rid of warnings (#21099)
183e012f01 is described below

commit 183e012f012fbf8c3799094a3365686bd19f50e3
Author: hankaj <[email protected]>
AuthorDate: Tue Jul 19 17:00:19 2022 +0200

    Get rid of warnings (#21099)
    
    * Get rid of warnings
    
    * Fix clang formatting
    
    * Fix problem with redefinition
    
    * Fix 'uninitialized' warning
    
    * Update CONTRIBUTORS.md
---
 CONTRIBUTORS.md                                      |  1 +
 src/operator/c_lapack_api.h                          |  8 ++++++++
 src/operator/quantization/dnnl/dnnl_quantized_rnn.cc |  2 --
 src/operator/tensor/broadcast_reduce-inl.h           |  2 +-
 src/operator/tensor/index_update-inl.h               |  1 -
 tests/cpp/engine/threaded_engine_test.cc             |  2 --
 tests/cpp/include/test_util.h                        | 10 +++++-----
 7 files changed, 15 insertions(+), 11 deletions(-)

diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 716f146b78..6fe0e33971 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -302,6 +302,7 @@ List of Contributors
 * [Dominika Jedynak](https://github.com/DominikaJedynak)
 * [Adam Grabowski](https://github.com/agrabows)
 * [Kacper Pietkun](https://github.com/Kacper-Pietkun)
+* [Hanna JarlaczyƄska](https://github.com/hankaj)
 
 Label Bot
 ---------
diff --git a/src/operator/c_lapack_api.h b/src/operator/c_lapack_api.h
index ee750013d8..aa1fea8785 100644
--- a/src/operator/c_lapack_api.h
+++ b/src/operator/c_lapack_api.h
@@ -1045,6 +1045,14 @@ MXNET_LAPACK_CWRAPPER12(dorgqr, double)
 #undef MXNET_LAPACK_CWRAPPER2
 #undef MXNET_LAPACK_CWRAPPER3
 #undef MXNET_LAPACK_CWRAPPER4
+#undef MXNET_LAPACK_CWRAPPER5
+#undef MXNET_LAPACK_CWRAPPER6
+#undef MXNET_LAPACK_CWRAPPER7
+#undef MXNET_LAPACK_CWRAPPER8
+#undef MXNET_LAPACK_CWRAPPER9
+#undef MXNET_LAPACK_CWRAPPER10
+#undef MXNET_LAPACK_CWRAPPER11
+#undef MXNET_LAPACK_CWRAPPER12
 #undef MXNET_LAPACK_UNAVAILABLE
 #endif
 
diff --git a/src/operator/quantization/dnnl/dnnl_quantized_rnn.cc 
b/src/operator/quantization/dnnl/dnnl_quantized_rnn.cc
index 73393d9b4c..6902f898a6 100644
--- a/src/operator/quantization/dnnl/dnnl_quantized_rnn.cc
+++ b/src/operator/quantization/dnnl/dnnl_quantized_rnn.cc
@@ -279,8 +279,6 @@ void DNNLQuantizedRnnOp::Forward(const OpContext& op_ctx,
   char* dst_state         = nullptr;  // Output state
   char* src_state_cell    = nullptr;  // Used in LSTM for cell state
   char* dst_state_cell    = nullptr;  // Used in LSTM for cell state
-  const size_t cell_bytes = (default_param.bidirectional + 1) * 
default_param.batch_size_ *
-                            default_param.state_size * 
mshadow::mshadow_sizeof(data_dtype);
 
   if (default_param.state_outputs && req[rnn_enum::kStateOut] != kNullOp) {
     stateout_mem =
diff --git a/src/operator/tensor/broadcast_reduce-inl.h 
b/src/operator/tensor/broadcast_reduce-inl.h
index 77a81bcb64..f9834d05b6 100644
--- a/src/operator/tensor/broadcast_reduce-inl.h
+++ b/src/operator/tensor/broadcast_reduce-inl.h
@@ -327,7 +327,7 @@ MSHADOW_XINLINE std::pair<AType, AType> 
seq_reduce_assign_block(size_t start,
                                                                 const 
Shape<ndim>& rshape,
                                                                 const 
Shape<ndim>& rstride) {
   Shape<ndim> coord;
-  AType val, residual;
+  AType val, residual{};
   Reducer::SetInitValue(val, residual);
   for (size_t k = start; k < start + len; ++k) {
     coord      = mxnet_op::unravel(k, rshape);
diff --git a/src/operator/tensor/index_update-inl.h 
b/src/operator/tensor/index_update-inl.h
index 8d43f3e327..83da9543c2 100644
--- a/src/operator/tensor/index_update-inl.h
+++ b/src/operator/tensor/index_update-inl.h
@@ -254,7 +254,6 @@ inline void IndexUpdateOpBackwardA(const nnvm::NodeAttrs& 
attrs,
   TBlob ograd             = inputs[0];
   TBlob ind               = inputs[1];
   const TBlob& grad_a     = outputs[0];
-  mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
   // get the number of 'ind' index
   if (ind.shape_.ndim() == 0) {
     ind.shape_ = Shape2(1, 1);
diff --git a/tests/cpp/engine/threaded_engine_test.cc 
b/tests/cpp/engine/threaded_engine_test.cc
index 61509e888e..c474404337 100644
--- a/tests/cpp/engine/threaded_engine_test.cc
+++ b/tests/cpp/engine/threaded_engine_test.cc
@@ -51,8 +51,6 @@ struct Workload {
   int time;
 };
 
-static uint32_t seed_ = 0xdeadbeef;
-
 /**
  * generate a list of workloads
  */
diff --git a/tests/cpp/include/test_util.h b/tests/cpp/include/test_util.h
index 48e3971a88..2cb590230e 100644
--- a/tests/cpp/include/test_util.h
+++ b/tests/cpp/include/test_util.h
@@ -787,11 +787,11 @@ struct ScopeSet {
   T saveValue_;
 };
 
-static void AssertEqual(const std::vector<NDArray*>& in_arrs,
-                        const std::vector<NDArray*>& out_arrs,
-                        float rtol           = 1e-5,
-                        float atol           = 1e-8,
-                        bool test_first_only = false) {
+static inline void AssertEqual(const std::vector<NDArray*>& in_arrs,
+                               const std::vector<NDArray*>& out_arrs,
+                               float rtol           = 1e-5,
+                               float atol           = 1e-8,
+                               bool test_first_only = false) {
   for (size_t j = 0; j < in_arrs.size(); ++j) {
     // When test_all is fir
     if (test_first_only && j == 1) {

Reply via email to