This is an automated email from the ASF dual-hosted git repository.
cjolivier01 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new 46cf856 Fix unit test build (#9371)
46cf856 is described below
commit 46cf8567e13cb939bf7d026e85800a43e79c9beb
Author: Chris Olivier <[email protected]>
AuthorDate: Wed Jan 10 14:25:01 2018 -0800
Fix unit test build (#9371)
* Fix unit test build
* lint
---
tests/cpp/misc/memory_test.cc | 34 ++---
tests/cpp/operator/batchnorm_test.cc | 202 +++++++++++++-------------
tests/cpp/operator/tune/operator_tune_test.cc | 3 +-
3 files changed, 122 insertions(+), 117 deletions(-)
diff --git a/tests/cpp/misc/memory_test.cc b/tests/cpp/misc/memory_test.cc
index 8f4e8c2..7c83e46 100644
--- a/tests/cpp/misc/memory_test.cc
+++ b/tests/cpp/misc/memory_test.cc
@@ -64,40 +64,40 @@ TEST(MEMORY_TEST, MemsetAndMemcopyPerformance) {
std::cout << "====================================" << std::endl
<< "Data size: " << test::pretty_num(test_size) << std::endl <<
std::flush;
- std::unique_ptr<uint8_t> buffer_1(new uint8_t[test_size]), buffer_2(new
uint8_t[test_size]);
- uint8_t *src = buffer_1.get(), *dest = buffer_2.get();
+ std::unique_ptr<float> buffer_1(new float[test_size]), buffer_2(new
float[test_size]);
+ float *src = buffer_1.get(), *dest = buffer_2.get();
for (size_t x = 0; x < 5; ++x) {
// Init memory with different values
- memset(src, 3, test_size);
- memset(dest, 255, test_size); // wipe out some/all of src cache
+ memset(src, 3, test_size * sizeof(float));
+ memset(dest, 255, test_size * sizeof(float)); // wipe out some/all of
src cache
// memset
- uint64_t start = test::perf::getNannoTickCount();
- memset(src, 123, test_size);
- const uint64_t memset_time = test::perf::getNannoTickCount() - start;
+ uint64_t start = mxnet::test::perf::getNannoTickCount();
+ memset(src, 0, test_size * sizeof(float));
+ const uint64_t memset_time = mxnet::test::perf::getNannoTickCount() -
start;
- start = test::perf::getNannoTickCount();
+ start = mxnet::test::perf::getNannoTickCount();
#pragma omp parallel for num_threads(GetOMPThreadCount())
for (int i = 0; i < static_cast<int>(test_size); ++i) {
- src[i] = 42;
+ src[i] = 42.0f;
}
- const uint64_t omp_set_time = test::perf::getNannoTickCount() - start;
+ const uint64_t omp_set_time = mxnet::test::perf::getNannoTickCount() -
start;
- start = test::perf::getNannoTickCount();
- memcpy(dest, src, test_size);
- const uint64_t memcpy_time = test::perf::getNannoTickCount() - start;
+ start = mxnet::test::perf::getNannoTickCount();
+ memcpy(dest, src, test_size * sizeof(float));
+ const uint64_t memcpy_time = mxnet::test::perf::getNannoTickCount() -
start;
// bounce the cache and dirty logic
- memset(src, 6, test_size);
- memset(dest, 200, test_size);
+ memset(src, 6, test_size * sizeof(float));
+ memset(dest, 200, test_size * sizeof(float));
- start = test::perf::getNannoTickCount();
+ start = mxnet::test::perf::getNannoTickCount();
#pragma omp parallel for num_threads(GetOMPThreadCount())
for (int i = 0; i < static_cast<int>(test_size); ++i) {
dest[i] = src[i];
}
- const uint64_t omp_copy_time = test::perf::getNannoTickCount() - start;
+ const uint64_t omp_copy_time = mxnet::test::perf::getNannoTickCount() -
start;
memset_times.push_back(memset_time);
omp_set_times.push_back(omp_set_time);
diff --git a/tests/cpp/operator/batchnorm_test.cc
b/tests/cpp/operator/batchnorm_test.cc
index d9b426a..179e42a 100644
--- a/tests/cpp/operator/batchnorm_test.cc
+++ b/tests/cpp/operator/batchnorm_test.cc
@@ -392,17 +392,17 @@ class BatchNormValidator : public
test::op::Validator<DType, AccReal> {
// Input
EXPECT_TRUE(compare(*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType,
AccReal>::kInput,
- op::batchnorm::kData));
+ mxnet::op::batchnorm::kData));
EXPECT_TRUE(compare(*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType,
AccReal>::kInput,
- op::batchnorm::kGamma));
+ mxnet::op::batchnorm::kGamma));
EXPECT_TRUE(compare(*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType,
AccReal>::kInput,
- op::batchnorm::kBeta));
+ mxnet::op::batchnorm::kBeta));
// Output
EXPECT_TRUE(compare(*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType,
AccReal>::kOutput,
- op::batchnorm::kOut));
+ mxnet::op::batchnorm::kOut));
CHECK_EQ(info_2.prop_->getParam().use_global_stats,
info_1.prop_->getParam().use_global_stats);
@@ -410,29 +410,29 @@ class BatchNormValidator : public
test::op::Validator<DType, AccReal> {
// Aux
EXPECT_TRUE(compare(*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType, AccReal>::kAux,
- op::batchnorm::kMovingMean));
+ mxnet::op::batchnorm::kMovingMean));
EXPECT_TRUE(compare(*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType, AccReal>::kAux,
- op::batchnorm::kMovingVar));
+ mxnet::op::batchnorm::kMovingVar));
#endif
if (!info_2.prop_->getParam().use_global_stats) {
EXPECT_TRUE(compare(*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType,
AccReal>::kOutput,
- op::batchnorm::kMean));
+ mxnet::op::batchnorm::kMean));
// InGrad
EXPECT_TRUE(compare(*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType,
AccReal>::kInGrad,
- op::batchnorm::kData));
+ mxnet::op::batchnorm::kData));
EXPECT_TRUE(compare(*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType,
AccReal>::kInGrad,
- op::batchnorm::kGamma));
+ mxnet::op::batchnorm::kGamma));
EXPECT_TRUE(compare(*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType,
AccReal>::kInGrad,
- op::batchnorm::kBeta));
+ mxnet::op::batchnorm::kBeta));
// OutGrad
EXPECT_TRUE(compare(*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType,
AccReal>::kOutGrad,
- op::batchnorm::kData));
+ mxnet::op::batchnorm::kData));
}
}
};
@@ -488,16 +488,16 @@ static StreamType& dumpF(StreamType *os,
*os << "=============================" << std::endl;
}
typedef typename OperatorExecutor::BlobVectorType BlobVectorType;
- PRT(os, *prop.executor_, BlobVectorType::kInput, op::batchnorm::kData);
- PRT(os, *prop.executor_, BlobVectorType::kInput, op::batchnorm::kGamma);
- PRT(os, *prop.executor_, BlobVectorType::kInput, op::batchnorm::kBeta);
+ PRT(os, *prop.executor_, BlobVectorType::kInput,
mxnet::op::batchnorm::kData);
+ PRT(os, *prop.executor_, BlobVectorType::kInput,
mxnet::op::batchnorm::kGamma);
+ PRT(os, *prop.executor_, BlobVectorType::kInput,
mxnet::op::batchnorm::kBeta);
- PRT(os, *prop.executor_, BlobVectorType::kAux, op::batchnorm::kMovingMean);
- PRT(os, *prop.executor_, BlobVectorType::kAux, op::batchnorm::kMovingVar);
+ PRT(os, *prop.executor_, BlobVectorType::kAux,
mxnet::op::batchnorm::kMovingMean);
+ PRT(os, *prop.executor_, BlobVectorType::kAux,
mxnet::op::batchnorm::kMovingVar);
- PRT(os, *prop.executor_, BlobVectorType::kOutput, op::batchnorm::kOut);
- PRT(os, *prop.executor_, BlobVectorType::kOutput, op::batchnorm::kMean);
- PRT(os, *prop.executor_, BlobVectorType::kOutput, op::batchnorm::kVar);
+ PRT(os, *prop.executor_, BlobVectorType::kOutput,
mxnet::op::batchnorm::kOut);
+ PRT(os, *prop.executor_, BlobVectorType::kOutput,
mxnet::op::batchnorm::kMean);
+ PRT(os, *prop.executor_, BlobVectorType::kOutput,
mxnet::op::batchnorm::kVar);
}
return *os;
}
@@ -515,14 +515,14 @@ static StreamType& dumpB(StreamType *os,
}
typedef typename OperatorExecutor::BlobVectorType BlobVectorType;
- PRT(os, *prop.executor_, BlobVectorType::kInGrad, op::batchnorm::kData);
- PRT(os, *prop.executor_, BlobVectorType::kInGrad, op::batchnorm::kGamma);
- PRT(os, *prop.executor_, BlobVectorType::kInGrad, op::batchnorm::kBeta);
+ PRT(os, *prop.executor_, BlobVectorType::kInGrad,
mxnet::op::batchnorm::kData);
+ PRT(os, *prop.executor_, BlobVectorType::kInGrad,
mxnet::op::batchnorm::kGamma);
+ PRT(os, *prop.executor_, BlobVectorType::kInGrad,
mxnet::op::batchnorm::kBeta);
- PRT(os, *prop.executor_, BlobVectorType::kAux, op::batchnorm::kMovingMean);
- PRT(os, *prop.executor_, BlobVectorType::kAux, op::batchnorm::kMovingVar);
+ PRT(os, *prop.executor_, BlobVectorType::kAux,
mxnet::op::batchnorm::kMovingMean);
+ PRT(os, *prop.executor_, BlobVectorType::kAux,
mxnet::op::batchnorm::kMovingVar);
- PRT(os, *prop.executor_, BlobVectorType::kOutGrad, op::batchnorm::kOut);
+ PRT(os, *prop.executor_, BlobVectorType::kOutGrad,
mxnet::op::batchnorm::kOut);
}
return *os;
}
@@ -625,7 +625,7 @@ static test::op::OpInfoPair<OperatorProp1, OperatorProp2,
OperatorExecutor> test
BatchNormValidator<DType, AccReal>::compare(
*info_1.executor_, *info_2.executor_,
test::op::LegacyOperatorExecutor<DType, AccReal>::kInput,
- op::batchnorm::kData, false);
+ mxnet::op::batchnorm::kData, false);
if (!thisCount) {
// return backward
@@ -671,13 +671,14 @@ testForwardAndBackward(const bool isGPU,
}
template<typename OperatorExecutor>
-static test::op::OpInfoPair<op::BatchNormV1Prop, op::BatchNormProp,
OperatorExecutor>
+static test::op::OpInfoPair<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp, OperatorExecutor>
testBNForwardAndBackward2D(const bool isGPU,
const TShape &inputShape,
const test::op::kwargs_t kwargs,
const bool dumpC = false) {
CHECK_EQ(inputShape.ndim(), 4); // V1 can only handle 2D
- return testForwardAndBackward<op::BatchNormV1Prop, op::BatchNormProp,
OperatorExecutor>(
+ return testForwardAndBackward<mxnet::op::BatchNormV1Prop,
+ mxnet::op::BatchNormProp, OperatorExecutor>(
isGPU,
isGPU,
inputShape,
@@ -708,14 +709,14 @@ TEST(BATCH_NORM, Test1DForward) {
MSHADOW_REAL_TYPE_SWITCH_EX(
type, DType, AccReal,
{
- TestBatchNormOperatorForward<op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
+ TestBatchNormOperatorForward<mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, {BATCH_SIZE, CHANNELS, DW}, blank_kwargs);
});
}
}
TEST(BATCH_NORM, Test2DForwardV1) {
- TestBatchNormOperatorForward<op::BatchNormProp, BNOperatorExecutor<float,
float>>(
+ TestBatchNormOperatorForward<mxnet::op::BatchNormProp,
BNOperatorExecutor<float, float>>(
false,
{BATCH_SIZE, CHANNELS, DH, DW},
blank_kwargs);
@@ -726,7 +727,7 @@ TEST(BATCH_NORM, Test2DForward) {
MSHADOW_REAL_TYPE_SWITCH_EX(
type, DType, AccReal,
{
- auto opInfoFloatH = TestBatchNormOperatorForward<op::BatchNormProp,
+ auto opInfoFloatH =
TestBatchNormOperatorForward<mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, {BATCH_SIZE, CHANNELS, DH, DW}, blank_kwargs);
});
@@ -738,7 +739,7 @@ TEST(BATCH_NORM, Test3DForward) {
MSHADOW_REAL_TYPE_SWITCH_EX(
type, DType, AccReal,
{
- TestBatchNormOperatorForward<op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
+ TestBatchNormOperatorForward<mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, {BATCH_SIZE, CHANNELS, DEPTH, DH, DW}, blank_kwargs);
});
}
@@ -829,7 +830,7 @@ TEST(BATCH_NORM, TestStochasticTiming_2D) {
MSHADOW_REAL_TYPE_SWITCH_EX(
mshadow::kFloat32, DType, AccReal,
{
- timingTest<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
+ timingTest<mxnet::op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
"RANDOM: BatchNormProp<cpu>", false, true,
blank_kwargs_nocudnn, GPU_TEST_DIMENSIONS); });
#if MXNET_USE_CUDA
@@ -837,7 +838,7 @@ TEST(BATCH_NORM, TestStochasticTiming_2D) {
MSHADOW_REAL_TYPE_SWITCH_EX(
mshadow::kFloat32, DType, AccReal,
{
- timingTest<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
+ timingTest<mxnet::op::BatchNormProp, BNOperatorExecutor<DType,
AccReal>>(
"RANDOM: BatchNormProp<gpu>", true, true,
blank_kwargs_nocudnn, GPU_TEST_DIMENSIONS); });
}
@@ -857,38 +858,38 @@ TEST(BATCH_NORM, TestTiming_2D) {
}
MSHADOW_REAL_TYPE_SWITCH_EX(
mshadow::kFloat32, DType, AccReal, {
- timingTest<op::BatchNormV1Prop, BNOperatorExecutor<DType, AccReal>>(
+ timingTest<mxnet::op::BatchNormV1Prop, BNOperatorExecutor<DType, AccReal>>(
"BatchNormV1Prop<cpu> 2D",
false, false,
blank_kwargs,
2, THISCOUNT);
#if defined(MXNET_USE_MKL2017) && (MXNET_USE_MKL2017 == 1)
- timingTest<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
+ timingTest<mxnet::op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
"MKL BatchNormProp<cpu> 2D",
false, false,
blank_kwargs_nocudnn,
2, THISCOUNT);
#endif
test::ScopeSet<volatile bool> disableMKL(&mxnet::op::batchnorm::disable_mkl,
true);
- timingTest<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
+ timingTest<mxnet::op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
"BatchNormProp<cpu> 2D",
false, false,
blank_kwargs_nocudnn,
2, THISCOUNT);
#if MXNET_USE_CUDA
if (test::unitTestsWithCuda) {
- timingTest<op::BatchNormV1Prop, BNOperatorExecutor<DType, AccReal>>(
+ timingTest<mxnet::op::BatchNormV1Prop, BNOperatorExecutor<DType, AccReal>>(
"BatchNormV1Prop<gpu> 2D",
true, false,
blank_kwargs,
2, THISCOUNT);
- timingTest<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
+ timingTest<mxnet::op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
"BatchNormProp<gpu> 2D",
true, false,
blank_kwargs_nocudnn,
2, THISCOUNT);
#if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5
- timingTest<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
+ timingTest<mxnet::op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
"CUDNN BatchNormProp<gpu> 2D",
true, false,
blank_kwargs,
@@ -906,8 +907,8 @@ MSHADOW_REAL_TYPE_SWITCH_EX(
template<typename DType, typename AccReal>
struct BothInfo {
- test::op::OpInfo<op::BatchNormV1Prop, BNOperatorExecutor<DType, AccReal>>
info_v1_;
- test::op::OpInfo<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>
info_;
+ test::op::OpInfo<mxnet::op::BatchNormV1Prop, BNOperatorExecutor<DType,
AccReal>> info_v1_;
+ test::op::OpInfo<mxnet::op::BatchNormProp, BNOperatorExecutor<DType,
AccReal>> info_;
};
TEST(BATCH_NORM, TestBackward2D_Simple) {
@@ -916,9 +917,10 @@ TEST(BATCH_NORM, TestBackward2D_Simple) {
{
const TShape inputShape({1, 1, 2, 1});
test::op::OpInfoPair<
- op::BatchNormV1Prop, op::BatchNormProp, BNOperatorExecutor<DType,
AccReal>> bi =
+ mxnet::op::BatchNormV1Prop,
+ mxnet::op::BatchNormProp, BNOperatorExecutor<DType, AccReal>> bi =
testForwardAndBackward<
- op::BatchNormV1Prop, op::BatchNormProp, BNOperatorExecutor<DType,
AccReal>>(
+ mxnet::op::BatchNormV1Prop, mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, inputShape, blank_kwargs); // Keep it simple
});
}
@@ -946,15 +948,17 @@ TEST(BATCH_NORM, TestIterAll) {
MSHADOW_REAL_TYPE_SWITCH_EX(
type, DType, AccReal,
{
- test::op::OpInfoPair<op::BatchNormProp, op::BatchNormProp,
+ test::op::OpInfoPair<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>
- bi = testForwardAndBackward<op::BatchNormProp,
op::BatchNormProp,
+ bi = testForwardAndBackward<mxnet::op::BatchNormProp,
+ mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
g1 != 0, g2 != 0, shape, kwargs, false); // Keep it
simple
if (shape.ndim() == 4 && type == mshadow::kFloat32 && !x3)
{
- test::op::OpInfoPair<op::BatchNormV1Prop,
op::BatchNormProp,
+ test::op::OpInfoPair<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>
- bi = testForwardAndBackward<op::BatchNormV1Prop,
op::BatchNormProp,
+ bi = testForwardAndBackward<mxnet::op::BatchNormV1Prop,
+ mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
g1 != 0, g2 != 0, shape, kwargs, false); // Keep it
simple
}
@@ -985,13 +989,13 @@ static void test_V1_V2_2D(const test::op::kwargs_t
&kwargs, const size_t count)
TShape shapes[2] = {2, 3};
const TShape inputShape({2, 3});
- test::op::OpInfo<op::BatchNormV1Prop, BNOperatorExecutor<DType,
AccReal>> info_1 =
+ test::op::OpInfo<mxnet::op::BatchNormV1Prop, BNOperatorExecutor<DType,
AccReal>> info_1 =
test::op::createOpAndInfoF<
- op::BatchNormV1Prop, BNOperatorExecutor<DType, AccReal>>(
+ mxnet::op::BatchNormV1Prop, BNOperatorExecutor<DType, AccReal>>(
kwargs, gpu_V1, inputShape);
- test::op::OpInfo<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>
info_2 =
- test::op::createOpAndInfoF<op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
+ test::op::OpInfo<mxnet::op::BatchNormProp, BNOperatorExecutor<DType,
AccReal>> info_2 =
+ test::op::createOpAndInfoF<mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
kwargs, gpu_V2, inputShape);
info_1.executor_->initForward(*info_1.prop_, &info_1.in_type_);
@@ -999,7 +1003,7 @@ static void test_V1_V2_2D(const test::op::kwargs_t
&kwargs, const size_t count)
info_1.executor_->initBackward(*info_1.prop_, &info_1.in_type_);
info_2.executor_->initBackward(*info_1.prop_, &info_1.in_type_);
- TBlob &blob1 = info_1.executor_->inputs()[op::batchnorm::kData];
+ TBlob &blob1 = info_1.executor_->inputs()[mxnet::op::batchnorm::kData];
test::data_ref<DType>(&blob1, {0, 0}) = -0.05f;
test::data_ref<DType>(&blob1, {0, 1}) = -0.19f;
test::data_ref<DType>(&blob1, {0, 2}) = 0.02f;
@@ -1007,7 +1011,7 @@ static void test_V1_V2_2D(const test::op::kwargs_t
&kwargs, const size_t count)
test::data_ref<DType>(&blob1, {1, 1}) = 0.06f;
test::data_ref<DType>(&blob1, {1, 2}) = -0.01f;
- TBlob &blob2 = info_2.executor_->inputs()[op::batchnorm::kData];
+ TBlob &blob2 = info_2.executor_->inputs()[mxnet::op::batchnorm::kData];
test::data_ref<DType>(&blob2, {0, 0}) = -0.05f;
test::data_ref<DType>(&blob2, {0, 1}) = -0.19f;
test::data_ref<DType>(&blob2, {0, 2}) = 0.02f;
@@ -1015,11 +1019,11 @@ static void test_V1_V2_2D(const test::op::kwargs_t
&kwargs, const size_t count)
test::data_ref<DType>(&blob2, {1, 1}) = 0.06f;
test::data_ref<DType>(&blob2, {1, 2}) = -0.01f;
-
test::data_ref<DType>(&info_1.executor_->inputs()[op::batchnorm::kGamma], {1})
= 3;
-
test::data_ref<DType>(&info_2.executor_->inputs()[op::batchnorm::kGamma], {1})
= 3;
+
test::data_ref<DType>(&info_1.executor_->inputs()[mxnet::op::batchnorm::kGamma],
{1}) = 3;
+
test::data_ref<DType>(&info_2.executor_->inputs()[mxnet::op::batchnorm::kGamma],
{1}) = 3;
- test::data_ref<DType>(&info_1.executor_->inputs()[op::batchnorm::kBeta],
{0}) = 3;
- test::data_ref<DType>(&info_2.executor_->inputs()[op::batchnorm::kBeta],
{0}) = 3;
+
test::data_ref<DType>(&info_1.executor_->inputs()[mxnet::op::batchnorm::kBeta],
{0}) = 3;
+
test::data_ref<DType>(&info_2.executor_->inputs()[mxnet::op::batchnorm::kBeta],
{0}) = 3;
for (size_t x = 0; x < count; ++x) {
info_1.executor_->forward();
@@ -1052,9 +1056,9 @@ TEST(BATCH_NORM, TestBackward2D_SimpleNFG) {
mshadow::kFloat32, DType, AccReal,
{
const TShape inputShape({1, 1, 2, 1});
- test::op::OpInfoPair<op::BatchNormV1Prop, op::BatchNormProp,
+ test::op::OpInfoPair<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>> bi =
- testForwardAndBackward<op::BatchNormV1Prop, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, inputShape, nonfixgamma_kwargs);
});
@@ -1066,9 +1070,9 @@ TEST(BATCH_NORM, Test2DBackward_Complex) {
{
test::ScopeSet<bool> noDebugOutput(&test::debug_output, false);
const TShape inputShape({9, 14, 16, 91});
- test::op::OpInfoPair<op::BatchNormV1Prop, op::BatchNormProp,
+ test::op::OpInfoPair<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>> bi =
- testForwardAndBackward<op::BatchNormV1Prop, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, inputShape, blank_kwargs);
});
@@ -1078,9 +1082,9 @@ struct Test2DBackward2DPlusLoadAndCompareLogicUtil {
template <typename DType, typename AccReal>
static void test() {
const TShape inputShape({1, 1, 2, 1});
- test::op::OpInfoPair<op::BatchNormV1Prop, op::BatchNormProp,
+ test::op::OpInfoPair<mxnet::op::BatchNormV1Prop, mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>> bi =
- testForwardAndBackward<op::BatchNormV1Prop, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, inputShape, blank_kwargs, false, 1, 5);
@@ -1116,8 +1120,8 @@ struct Test2DBackward2DPlusLoadAndCompareLogicUtil {
// Expected data state when running forward+backward starting with default
values
// Note: This data structure generated by dumpC()
// Test loaded data agsinst calculated data
- test::op::OpInfo<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>
info_checkLoad =
- test::op::createOpAndInfoF<op::BatchNormProp, BNOperatorExecutor<DType,
AccReal>>(
+ test::op::OpInfo<mxnet::op::BatchNormProp, BNOperatorExecutor<DType,
AccReal>> info_checkLoad =
+ test::op::createOpAndInfoF<mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
blank_kwargs, false, inputShape);
info_checkLoad.executor_->initForward(*info_checkLoad.prop_,
&info_checkLoad.in_type_);
info_checkLoad.executor_->initBackward(*info_checkLoad.prop_,
&info_checkLoad.in_type_);
@@ -1159,8 +1163,8 @@ TEST(BATCH_NORM, TestBackward1D_Simple) {
mshadow::kFloat32, DTypeX, AccReal,
{
const TShape inputShape({1, 1, 2});
- test::op::OpInfo<op::BatchNormProp, BNOperatorExecutor<DTypeX, AccReal>>
info =
- TestBatchNormOperatorForward<op::BatchNormProp,
BNOperatorExecutor<DTypeX, AccReal>>(
+ test::op::OpInfo<mxnet::op::BatchNormProp, BNOperatorExecutor<DTypeX,
AccReal>> info =
+ TestBatchNormOperatorForward<mxnet::op::BatchNormProp,
BNOperatorExecutor<DTypeX, AccReal>>(
false, inputShape, blank_kwargs);
info.executor_->initBackward(*info.prop_, &info.in_type_);
runOperatorBackward(&info);
@@ -1207,8 +1211,8 @@ TEST(BATCH_NORM, TestBackward3D) {
mshadow::kFloat32, DType, AccReal,
{
const TShape inputShape({2, 3, 2, 3, 5});
- test::op::OpInfo<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>
info =
- TestBatchNormOperatorForward<op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
+ test::op::OpInfo<mxnet::op::BatchNormProp, BNOperatorExecutor<DType,
AccReal>> info =
+ TestBatchNormOperatorForward<mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, inputShape, blank_kwargs);
info.executor_->initBackward(*info.prop_, &info.in_type_);
runOperatorBackward(&info);
@@ -1225,9 +1229,9 @@ TEST(BATCH_NORM, Test2DBackwardMixed_cpu_cpu_nfg) {
mshadow::kFloat32, DType, AccReal,
{
const TShape inputShape({1, 1, 2, 1});
- test::op::OpInfoPair<op::BatchNormV1Prop, op::BatchNormProp,
+ test::op::OpInfoPair<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>> bi =
- testForwardAndBackward<op::BatchNormV1Prop, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, false, inputShape, nonfixgamma_kwargs, false);
dumpF(&std::cout, bi);
@@ -1241,9 +1245,9 @@ TEST(BATCH_NORM, Test2DBackwardMixed_cpu_cpu_ugs) {
mshadow::kFloat32, DType, AccReal,
{
const TShape inputShape({1, 1, 2, 1});
- test::op::OpInfoPair<op::BatchNormV1Prop, op::BatchNormProp,
+ test::op::OpInfoPair<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>> bi =
- testForwardAndBackward<op::BatchNormV1Prop, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, false, inputShape, useglobalstats_kwargs, false);
dumpF(&std::cout, bi);
@@ -1504,16 +1508,16 @@ static void runChannelAxisTest(
// Create operator 1 with ChannelAxis2 (normally the experimental one)
kwargs.push_back({"axis", std::to_string(channelAxis1)});
- test::op::OpInfo<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>
info_c1 =
+ test::op::OpInfo<mxnet::op::BatchNormProp, BNOperatorExecutor<DType,
AccReal>> info_c1 =
test::op::createOpAndInfoF<
- op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
+ mxnet::op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>(
kwargs, isGPU1, shape_c1);
// Create operator 2 with ChannelAxis2 (normally the control one)
kwargs.pop_back();
kwargs.push_back({"axis", std::to_string(channelAxis2)});
- test::op::OpInfo<op::BatchNormProp, BNOperatorExecutor<DType, AccReal>>
info_c2 =
- test::op::createOpAndInfoF<op::BatchNormProp, BNOperatorExecutor<DType,
AccReal>>(
+ test::op::OpInfo<mxnet::op::BatchNormProp, BNOperatorExecutor<DType,
AccReal>> info_c2 =
+ test::op::createOpAndInfoF<mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
kwargs, isGPU2, shape_c2);
kwargs.pop_back();
@@ -1643,11 +1647,11 @@ TEST(BATCH_NORM, Test2DForwardV12D_gpu) {
MSHADOW_REAL_TYPE_SWITCH_EX(
mshadow::kFloat32, DType, AccReal,
{
- TestBatchNormOperatorForward<op::BatchNormV1Prop,
BNOperatorExecutor<DType, AccReal>>(
+ TestBatchNormOperatorForward<mxnet::op::BatchNormV1Prop,
BNOperatorExecutor<DType, AccReal>>(
true,
{BATCH_SIZE, CHANNELS, DH, DW},
blank_kwargs);
- TestBatchNormOperatorForward<op::BatchNormV1Prop,
BNOperatorExecutor<DType, AccReal>>(
+ TestBatchNormOperatorForward<mxnet::op::BatchNormV1Prop,
BNOperatorExecutor<DType, AccReal>>(
true,
{BATCH_SIZE, CHANNELS, DH, DW},
blank_kwargs);
@@ -1659,11 +1663,11 @@ TEST(BATCH_NORM, Test2DForward2D_gpu) {
MSHADOW_REAL_TYPE_SWITCH_EX(
type, DType, AccReal,
{
- TestBatchNormOperatorForward<op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
+ TestBatchNormOperatorForward<mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
true,
{BATCH_SIZE, CHANNELS, DH, DW},
blank_kwargs);
- TestBatchNormOperatorForward<op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
+ TestBatchNormOperatorForward<mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
true,
{BATCH_SIZE, CHANNELS, DH, DW},
blank_kwargs_nocudnn);
@@ -1677,7 +1681,7 @@ TEST(BATCH_NORM, Test2DBackwardMixedV1_gpu_cpu) {
mshadow::kFloat32, DType, AccReal,
{
const TShape inputShape({1, 1, 2, 1});
- testForwardAndBackward<op::BatchNormV1Prop, op::BatchNormV1Prop,
+ testForwardAndBackward<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormV1Prop,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, blank_kwargs, false);
});
@@ -1688,7 +1692,7 @@ TEST(BATCH_NORM, Test2DBackwardMixedV1Complex_gpu_cpu) {
mshadow::kFloat32, DType, AccReal,
{
const TShape inputShape({BATCH_SIZE, CHANNELS, DH, DW});
- testForwardAndBackward<op::BatchNormV1Prop, op::BatchNormV1Prop,
+ testForwardAndBackward<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormV1Prop,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, blank_kwargs, false);
});
@@ -1700,10 +1704,10 @@ TEST(BATCH_NORM, Test2DBackwardMixed_gpu_cpu) {
type, DType, AccReal,
{
const TShape inputShape({1, 1, 2, 1});
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, blank_kwargs, false);
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, blank_kwargs_nocudnn, false);
});
@@ -1716,10 +1720,10 @@ TEST(BATCH_NORM, Test2DBackwardMixedComplex_gpu_cpu) {
type, DType, AccReal,
{
const TShape inputShape({BATCH_SIZE, CHANNELS, DH, DW});
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, blank_kwargs, false);
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, blank_kwargs_nocudnn, false);
});
@@ -1734,7 +1738,7 @@ TEST(BATCH_NORM,
Test2DBackwardMixedV1V2Complex_cpu_cpu_nfg) {
mshadow::kFloat32, DType, AccReal,
{
const TShape inputShape({BATCH_SIZE, CHANNELS, DH, DW});
- testForwardAndBackward<op::BatchNormV1Prop, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, false, inputShape, nonfixgamma_kwargs, false);
});
@@ -1746,10 +1750,10 @@ TEST(BATCH_NORM, Test2DBackwardMixed_gpu_cpu_nfg) {
type, DType, AccReal,
{
const TShape inputShape({1, 1, 2, 1});
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, nonfixgamma_kwargs, false);
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, nonfixgamma_kwargs_nocudnn, false);
});
@@ -1762,10 +1766,10 @@ TEST(BATCH_NORM,
Test2DBackwardMixedComplex_gpu_cpu_nfg) {
type, DType, AccReal,
{
const TShape inputShape({BATCH_SIZE, CHANNELS, DH, DW});
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, nonfixgamma_kwargs, false);
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, nonfixgamma_kwargs_nocudnn, false);
});
@@ -1780,9 +1784,9 @@ TEST(BATCH_NORM,
Test2DBackwardMixedV1V2Complex_cpu_cpu_ugs) {
mshadow::kFloat32, DType, AccReal,
{
const TShape inputShape({BATCH_SIZE, CHANNELS, DH, DW});
- test::op::OpInfoPair<op::BatchNormV1Prop, op::BatchNormProp,
+ test::op::OpInfoPair<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>> bi =
- testForwardAndBackward<op::BatchNormV1Prop, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormV1Prop,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, false, inputShape, useglobalstats_kwargs, false);
dumpF(&std::cout, bi);
@@ -1796,10 +1800,10 @@ TEST(BATCH_NORM, Test2DBackwardMixed_gpu_cpu_ugs) {
type, DType, AccReal,
{
const TShape inputShape({2, 3, 2, 2});
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, useglobalstats_kwargs_nocudnn, false);
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, useglobalstats_kwargs, false);
});
@@ -1812,10 +1816,10 @@ TEST(BATCH_NORM,
Test2DBackwardMixedComplex_gpu_cpu_ugs) {
type, DType, AccReal,
{
const TShape inputShape({BATCH_SIZE, CHANNELS, DH, DW});
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, useglobalstats_kwargs, false);
- testForwardAndBackward<op::BatchNormProp, op::BatchNormProp,
+ testForwardAndBackward<mxnet::op::BatchNormProp,
mxnet::op::BatchNormProp,
BNOperatorExecutor<DType, AccReal>>(
false, true, inputShape, useglobalstats_kwargs_nocudnn, false);
});
diff --git a/tests/cpp/operator/tune/operator_tune_test.cc
b/tests/cpp/operator/tune/operator_tune_test.cc
index 60096d3..f404e4f 100644
--- a/tests/cpp/operator/tune/operator_tune_test.cc
+++ b/tests/cpp/operator/tune/operator_tune_test.cc
@@ -31,7 +31,8 @@ using namespace mxnet;
* \brief ActivationOp timing test for CPU
*/
TEST(OMP_TUNING, ShowAllTunedOps) {
- const std::unordered_set<std::string>& op_names =
op::OperatorTune<float>::TunedOperatorNames();
+ const std::unordered_set<std::string>& op_names =
+ mxnet::op::OperatorTune<float>::TunedOperatorNames();
for (auto iter = op_names.begin(), e_iter = op_names.end(); iter != e_iter;
++iter) {
std::cout << *iter << std::endl;
}
--
To stop receiving notification emails like this one, please contact
['"[email protected]" <[email protected]>'].