marcoabreu closed pull request #12361: [MXNET-860] - Avoid implicit double
conversions in math operations.
URL: https://github.com/apache/incubator-mxnet/pull/12361
This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:
As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):
diff --git a/src/io/image_det_aug_default.cc b/src/io/image_det_aug_default.cc
index 79e19318366..15169d8f760 100644
--- a/src/io/image_det_aug_default.cc
+++ b/src/io/image_det_aug_default.cc
@@ -560,7 +560,7 @@ class DefaultImageDetAugmenter : public ImageAugmenter {
}
cv::cvtColor(res, res, CV_HLS2BGR);
}
- if (fabs(c) > 1e-3) {
+ if (std::fabs(c) > 1e-3) {
cv::Mat tmp = res;
tmp.convertTo(res, -1, c + 1.f, 0);
}
diff --git a/src/operator/contrib/adaptive_avg_pooling.cc
b/src/operator/contrib/adaptive_avg_pooling.cc
index 00ab36605bf..a65f5fe8d43 100644
--- a/src/operator/contrib/adaptive_avg_pooling.cc
+++ b/src/operator/contrib/adaptive_avg_pooling.cc
@@ -26,8 +26,8 @@
// #include "elemwise_op_common.h"
#include "../elemwise_op_common.h"
-#define START_IND(a, b, c) static_cast<int>(floor(static_cast<float>(a * c) /
b))
-#define END_IND(a, b, c) static_cast<int>(ceil(static_cast<float>((a + 1) * c)
/ b))
+#define START_IND(a, b, c) static_cast<int>(std::floor(static_cast<float>(a *
c) / b))
+#define END_IND(a, b, c) static_cast<int>(std::ceil(static_cast<float>((a + 1)
* c) / b))
namespace mxnet {
namespace op {
diff --git a/src/operator/contrib/multi_proposal.cc
b/src/operator/contrib/multi_proposal.cc
index 3f5796e844a..e77a0b5aeba 100644
--- a/src/operator/contrib/multi_proposal.cc
+++ b/src/operator/contrib/multi_proposal.cc
@@ -67,8 +67,8 @@ inline void BBoxTransformInv(const mshadow::Tensor<cpu, 2>&
boxes,
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
- float pred_w = exp(dw) * width;
- float pred_h = exp(dh) * height;
+ float pred_w = std::exp(dw) * width;
+ float pred_h = std::exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0);
float pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0);
diff --git a/src/operator/contrib/multibox_detection.cc
b/src/operator/contrib/multibox_detection.cc
index e5a7dd8fb63..f92460e9e5e 100644
--- a/src/operator/contrib/multibox_detection.cc
+++ b/src/operator/contrib/multibox_detection.cc
@@ -62,8 +62,8 @@ inline void TransformLocations(DType *out, const DType
*anchors,
DType ph = loc_pred[3];
DType ox = px * vx * aw + ax;
DType oy = py * vy * ah + ay;
- DType ow = exp(pw * vw) * aw / 2;
- DType oh = exp(ph * vh) * ah / 2;
+ DType ow = std::exp(pw * vw) * aw / 2;
+ DType oh = std::exp(ph * vh) * ah / 2;
out[0] = clip ? std::max(DType(0), std::min(DType(1), ox - ow)) : (ox - ow);
out[1] = clip ? std::max(DType(0), std::min(DType(1), oy - oh)) : (oy - oh);
out[2] = clip ? std::max(DType(0), std::min(DType(1), ox + ow)) : (ox + ow);
diff --git a/src/operator/contrib/proposal.cc b/src/operator/contrib/proposal.cc
index fa713bba825..935372d34db 100644
--- a/src/operator/contrib/proposal.cc
+++ b/src/operator/contrib/proposal.cc
@@ -63,8 +63,8 @@ inline void BBoxTransformInv(const mshadow::Tensor<cpu, 2>&
boxes,
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
- float pred_w = exp(dw) * width;
- float pred_h = exp(dh) * height;
+ float pred_w = std::exp(dw) * width;
+ float pred_h = std::exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5 * (pred_w - 1.0);
float pred_y1 = pred_ctr_y - 0.5 * (pred_h - 1.0);
diff --git a/src/operator/contrib/roi_align.cc
b/src/operator/contrib/roi_align.cc
index 22611273cf5..65ee626a17c 100644
--- a/src/operator/contrib/roi_align.cc
+++ b/src/operator/contrib/roi_align.cc
@@ -182,9 +182,9 @@ void ROIAlignForward(
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
- : ceil(roi_height / pooled_height); // e.g., = 2
+ : std::ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
- (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
+ (sampling_ratio > 0) ? sampling_ratio : std::ceil(roi_width /
pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
@@ -357,9 +357,9 @@ void ROIAlignBackward(
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
- : ceil(roi_height / pooled_height); // e.g., = 2
+ : std::ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
- (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
+ (sampling_ratio > 0) ? sampling_ratio : std::ceil(roi_width /
pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
diff --git a/src/operator/correlation-inl.h b/src/operator/correlation-inl.h
index 7266a0a9184..9dca44e5512 100644
--- a/src/operator/correlation-inl.h
+++ b/src/operator/correlation-inl.h
@@ -98,9 +98,9 @@ class CorrelationOp : public Operator {
border_size_ = param_.max_displacement + kernel_radius_;
stride1 = param_.stride1;
stride2 = param_.stride2;
- top_width_ = ceil(static_cast<float>(paddedbottomwidth - border_size_ * 2)\
+ top_width_ = std::ceil(static_cast<float>(paddedbottomwidth - border_size_
* 2)\
/ static_cast<float>(stride1));
- top_height_ = ceil(static_cast<float>(paddedbottomheight - border_size_ *
2)\
+ top_height_ = std::ceil(static_cast<float>(paddedbottomheight -
border_size_ * 2)\
/ static_cast<float>(stride1));
neighborhood_grid_radius_ = param_.max_displacement / stride2;
neighborhood_grid_width_ = neighborhood_grid_radius_ * 2 + 1;
@@ -211,9 +211,9 @@ void Init(const std::vector<std::pair<std::string,
std::string> >& kwargs) overr
border_size_ = param_.max_displacement + kernel_radius_;
stride1 = param_.stride1;
stride2 = param_.stride2;
- top_width_ = ceil(static_cast<float>(paddedbottomwidth - border_size_ * 2)\
+ top_width_ = std::ceil(static_cast<float>(paddedbottomwidth - border_size_
* 2)\
/ static_cast<float>(stride1));
- top_height_ = ceil(static_cast<float>(paddedbottomheight - border_size_ *
2)\
+ top_height_ = std::ceil(static_cast<float>(paddedbottomheight -
border_size_ * 2)\
/ static_cast<float>(stride1));
neighborhood_grid_radius_ = param_.max_displacement / stride2;
neighborhood_grid_width_ = neighborhood_grid_radius_ * 2 + 1;
diff --git a/src/operator/image/image_random-inl.h
b/src/operator/image/image_random-inl.h
index 47beca1d506..c64ed28ecc2 100644
--- a/src/operator/image/image_random-inl.h
+++ b/src/operator/image/image_random-inl.h
@@ -418,10 +418,10 @@ void RGB2HLSConvert(const float& src_r,
float diff;
vmax = vmin = r;
- vmax = fmax(vmax, g);
- vmax = fmax(vmax, b);
- vmin = fmin(vmin, g);
- vmin = fmin(vmin, b);
+ vmax = std::fmax(vmax, g);
+ vmax = std::fmax(vmax, b);
+ vmin = std::fmin(vmin, g);
+ vmin = std::fmin(vmin, b);
diff = vmax - vmin;
l = (vmax + vmin) * 0.5f;
diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index 4ea494d64e4..f28f5d7a436 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -33,7 +33,7 @@
#endif
/*! \brief inverse standard deviation <-> variance */
-#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/sqrt((__var$) +
DType(__eps$)))
+#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/std::sqrt((__var$) +
DType(__eps$)))
#define INVSTD_TO_VARIANCE(__invstd$, __eps$) ((1.0 / ((__invstd$) *
(__invstd$))) - (__eps$))
namespace mxnet {
diff --git a/src/operator/nn/lrn-inl.h b/src/operator/nn/lrn-inl.h
index 63044959812..7bd91448533 100644
--- a/src/operator/nn/lrn-inl.h
+++ b/src/operator/nn/lrn-inl.h
@@ -61,9 +61,9 @@ struct LRNParam : public dmlc::Parameter<LRNParam> {
bool operator==(const LRNParam& other) const {
return (this->nsize == other.nsize &&
- fabs(this->alpha - other.alpha) < 1e-6 &&
- fabs(this->beta - other.beta) < 1e-6 &&
- fabs(this->knorm - other.knorm) < 1e-6);
+ std::fabs(this->alpha - other.alpha) < 1e-6 &&
+ std::fabs(this->beta - other.beta) < 1e-6 &&
+ std::fabs(this->knorm - other.knorm) < 1e-6);
}
}; // struct LRNParam
diff --git a/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h
b/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h
index 496ff99f4ee..e605c9bb19c 100644
--- a/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h
@@ -34,7 +34,7 @@
#include "./mkldnn_ops-inl.h"
#include "./mkldnn_base-inl.h"
-#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/sqrt((__var$) +
DType(__eps$)))
+#define VARIANCE_TO_INVSTD(__var$, __eps$) (1.0/std::sqrt((__var$) +
DType(__eps$)))
#define INVSTD_TO_VARIANCE(__invstd$, __eps$) ((1.0 / ((__invstd$) *
(__invstd$))) - (__eps$))
namespace mxnet {
namespace op {
diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc
index c133b63623a..558722edb20 100644
--- a/src/operator/nn/pooling.cc
+++ b/src/operator/nn/pooling.cc
@@ -127,7 +127,7 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs,
(dshape[2] + 2 * param.pad[0] - param.kernel[0]) /
param.stride[0];
} else {
- oshape[2] = 1 + static_cast<int>(ceil(
+ oshape[2] = 1 + static_cast<int>(std::ceil(
static_cast<float>(dshape[2] + 2 * param.pad[0] -
param.kernel[0]) /
param.stride[0]));
@@ -157,11 +157,11 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs,
(dshape[3] + 2 * param.pad[1] - param.kernel[1]) /
param.stride[1];
} else {
- oshape[2] = 1 + static_cast<int>(ceil(
+ oshape[2] = 1 + static_cast<int>(std::ceil(
static_cast<float>(dshape[2] + 2 * param.pad[0] -
param.kernel[0]) /
param.stride[0]));
- oshape[3] = 1 + static_cast<int>(ceil(
+ oshape[3] = 1 + static_cast<int>(std::ceil(
static_cast<float>(dshape[3] + 2 * param.pad[1] -
param.kernel[1]) /
param.stride[1]));
@@ -192,15 +192,15 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs,
(dshape[4] + 2 * param.pad[2] - param.kernel[2]) /
param.stride[2];
} else {
- oshape[2] = 1 + static_cast<int>(ceil(
+ oshape[2] = 1 + static_cast<int>(std::ceil(
static_cast<float>(dshape[2] + 2 * param.pad[0] -
param.kernel[0]) /
param.stride[0]));
- oshape[3] = 1 + static_cast<int>(ceil(
+ oshape[3] = 1 + static_cast<int>(std::ceil(
static_cast<float>(dshape[3] + 2 * param.pad[1] -
param.kernel[1]) /
param.stride[1]));
- oshape[4] = 1 + static_cast<int>(ceil(
+ oshape[4] = 1 + static_cast<int>(std::ceil(
static_cast<float>(dshape[4] + 2 * param.pad[2] -
param.kernel[2]) /
param.stride[2]));
diff --git a/src/operator/pooling_v1-inl.h b/src/operator/pooling_v1-inl.h
index 0a663265cbe..8942ddc0d71 100644
--- a/src/operator/pooling_v1-inl.h
+++ b/src/operator/pooling_v1-inl.h
@@ -273,10 +273,10 @@ class PoolingV1Prop : public OperatorProperty {
oshape[3] = 1 + (dshape[3] + 2 * param_.pad[1] - param_.kernel[1]) /
param_.stride[1];
} else {
- oshape[2] = 1 + static_cast<int>(ceil(static_cast<float>(
+ oshape[2] = 1 + static_cast<int>(std::ceil(static_cast<float>(
dshape[2] + 2 * param_.pad[0] -
param_.kernel[0]) / param_.stride[0]));
- oshape[3] = 1 + static_cast<int>(ceil(static_cast<float>(
+ oshape[3] = 1 + static_cast<int>(std::ceil(static_cast<float>(
dshape[3] + 2 * param_.pad[1] -
param_.kernel[1]) / param_.stride[1]));
}
@@ -296,13 +296,13 @@ class PoolingV1Prop : public OperatorProperty {
oshape[4] = 1 + (dshape[4] + 2 * param_.pad[2] - param_.kernel[2]) /
param_.stride[2];
} else {
- oshape[2] = 1 + static_cast<int>(ceil(static_cast<float>(
+ oshape[2] = 1 + static_cast<int>(std::ceil(static_cast<float>(
dshape[2] + 2 * param_.pad[0] -
param_.kernel[0]) / param_.stride[0]));
- oshape[3] = 1 + static_cast<int>(ceil(static_cast<float>(
+ oshape[3] = 1 + static_cast<int>(std::ceil(static_cast<float>(
dshape[3] + 2 * param_.pad[1] -
param_.kernel[1]) / param_.stride[1]));
- oshape[4] = 1 + static_cast<int>(ceil(static_cast<float>(
+ oshape[4] = 1 + static_cast<int>(std::ceil(static_cast<float>(
dshape[4] + 2 * param_.pad[2] -
param_.kernel[2]) / param_.stride[2]));
}
diff --git a/src/operator/roi_pooling.cc b/src/operator/roi_pooling.cc
index acff1f97dcc..124d811c46a 100644
--- a/src/operator/roi_pooling.cc
+++ b/src/operator/roi_pooling.cc
@@ -66,10 +66,10 @@ inline void ROIPoolForward(const Tensor<cpu, 4, Dtype> &out,
Dtype *top_data_n = top_data + n * out_size;
Dtype *argmax_data_n = argmax_data + n * max_idx_size;
int roi_batch_ind = bottom_rois_n[0];
- int roi_start_w = round(bottom_rois_n[1] * spatial_scale_);
- int roi_start_h = round(bottom_rois_n[2] * spatial_scale_);
- int roi_end_w = round(bottom_rois_n[3] * spatial_scale_);
- int roi_end_h = round(bottom_rois_n[4] * spatial_scale_);
+ int roi_start_w = std::round(bottom_rois_n[1] * spatial_scale_);
+ int roi_start_h = std::round(bottom_rois_n[2] * spatial_scale_);
+ int roi_end_w = std::round(bottom_rois_n[3] * spatial_scale_);
+ int roi_end_h = std::round(bottom_rois_n[4] * spatial_scale_);
assert(roi_batch_ind >= 0);
assert(static_cast<index_t>(roi_batch_ind) < data.size(0) /* batch size
*/);
@@ -171,10 +171,10 @@ inline void ROIPoolBackwardAcc(const Tensor<cpu, 4,
Dtype> &in_grad,
continue;
}
- int roi_start_w = round(offset_bottom_rois[1] * spatial_scale_);
- int roi_start_h = round(offset_bottom_rois[2] * spatial_scale_);
- int roi_end_w = round(offset_bottom_rois[3] * spatial_scale_);
- int roi_end_h = round(offset_bottom_rois[4] * spatial_scale_);
+ int roi_start_w = std::round(offset_bottom_rois[1] *
spatial_scale_);
+ int roi_start_h = std::round(offset_bottom_rois[2] *
spatial_scale_);
+ int roi_end_w = std::round(offset_bottom_rois[3] * spatial_scale_);
+ int roi_end_h = std::round(offset_bottom_rois[4] * spatial_scale_);
bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
diff --git a/src/operator/spatial_transformer.cc
b/src/operator/spatial_transformer.cc
index 13937290d90..8c6779df1b7 100644
--- a/src/operator/spatial_transformer.cc
+++ b/src/operator/spatial_transformer.cc
@@ -51,8 +51,8 @@ inline void BilinearSamplingForward(const Tensor<cpu, 4,
DType> &output,
const index_t grid_index = n * o_h * o_w * 2 + h * o_w + w;
const DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h -
1) / 2;
const DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2;
- const auto top_left_y = static_cast<int>(floor(y_real));
- const auto top_left_x = static_cast<int>(floor(x_real));
+ const auto top_left_y = static_cast<int>(std::floor(y_real));
+ const auto top_left_x = static_cast<int>(std::floor(x_real));
const DType top_left_y_w = 1.0 - (y_real - top_left_y);
const DType top_left_x_w = 1.0 - (x_real - top_left_x);
const int data_index = n * i_c * i_h * i_w + c * i_h * i_w +
@@ -99,8 +99,8 @@ inline void BilinearSamplingBackward(const Tensor<cpu, 4,
DType> &input_grad,
const index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
const DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1)
* (i_h - 1) / 2;
const DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1)
/ 2;
- const auto top_left_y = static_cast<int>(floor(y_real));
- const auto top_left_x = static_cast<int>(floor(x_real));
+ const auto top_left_y = static_cast<int>(std::floor(y_real));
+ const auto top_left_x = static_cast<int>(std::floor(x_real));
const DType top_left_y_w = 1.0 - (y_real - top_left_y);
const DType top_left_x_w = 1.0 - (x_real - top_left_x);
for (index_t c = 0; c < static_cast<index_t>(o_c); ++c) {
diff --git a/src/operator/tensor/elemwise_binary_op-inl.h
b/src/operator/tensor/elemwise_binary_op-inl.h
index 1b1a1d20077..72a02ff5fd8 100644
--- a/src/operator/tensor/elemwise_binary_op-inl.h
+++ b/src/operator/tensor/elemwise_binary_op-inl.h
@@ -68,13 +68,13 @@ void ElemwiseBinaryOp::RspRspOp(mshadow::Stream<cpu> *s,
if (rhs_is_dense) {
// For right-side dense, in order to have sparse output, lhs input
zero should
// always output zero
- CHECK(fabs(static_cast<float>(OP::Map(DType(0), DType(99)))) < 1e-4f);
+ CHECK(std::fabs(static_cast<float>(OP::Map(DType(0), DType(99)))) <
1e-4f);
CHECK(!is_dense_result); // Currently not handled
}
if (lhs_is_dense) {
// For left-side dense, in order to have sparse output, lhs input zero
should
// always output zero
- CHECK(fabs(static_cast<float>(OP::Map(DType(99), DType(0)))) < 1e-4f);
+ CHECK(std::fabs(static_cast<float>(OP::Map(DType(99), DType(0)))) <
1e-4f);
CHECK(!is_dense_result); // Currently not handled
}
@@ -102,10 +102,10 @@ void ElemwiseBinaryOp::RspRspOp(mshadow::Stream<cpu> *s,
CHECK_EQ(is_dense_result, false);
if (lhs_in_place) {
// For in-place, zero L-value must always be zero output
- DCHECK(fabs(static_cast<float>(OP::Map(DType(0), DType(99)))) <
DType(1e-3));
+ DCHECK(std::fabs(static_cast<float>(OP::Map(DType(0),
DType(99)))) < DType(1e-3));
} else {
// For in-place, zero R-value must always be zero output
- DCHECK(fabs(static_cast<float>(OP::Map(DType(99), DType(0)))) <
DType(1e-3));
+ DCHECK(std::fabs(static_cast<float>(OP::Map(DType(99),
DType(0)))) < DType(1e-3));
}
}
}
diff --git a/src/operator/tensor/elemwise_binary_op.h
b/src/operator/tensor/elemwise_binary_op.h
index cb1db0ec632..9b451fa6935 100644
--- a/src/operator/tensor/elemwise_binary_op.h
+++ b/src/operator/tensor/elemwise_binary_op.h
@@ -646,7 +646,7 @@ class ElemwiseBinaryOp : public OpBase {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype
== kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
- DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
+ DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
@@ -657,7 +657,7 @@ class ElemwiseBinaryOp : public OpBase {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype
== kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
- DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
+ DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services