This is an automated email from the ASF dual-hosted git repository.
zhasheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new ab30cf8 [MXNET-8230] test_operator_gpu.test_rms fails (#11749)
ab30cf8 is described below
commit ab30cf803fbd3d448f6537601c15278446ecfaf8
Author: Frank Liu <[email protected]>
AuthorDate: Tue Jul 17 20:45:17 2018 -0700
[MXNET-8230] test_operator_gpu.test_rms fails (#11749)
* Fix flaky test#8230, test_rms fails.
* Address code review comments.
---
tests/python/unittest/test_optimizer.py | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/tests/python/unittest/test_optimizer.py
b/tests/python/unittest/test_optimizer.py
index a5b3d40..fdf7d27 100644
--- a/tests/python/unittest/test_optimizer.py
+++ b/tests/python/unittest/test_optimizer.py
@@ -835,8 +835,7 @@ class PyRMSProp(mx.optimizer.Optimizer):
if self.clip_weights:
mx.ndarray.clip(weight, -self.clip_weights, self.clip_weights,
out=weight)
[email protected]("Test fails intermittently. Temporarily disabled until fixed.
Tracked at https://github.com/apache/incubator-mxnet/issues/8230")
-@with_seed(0)
+@with_seed()
def test_rms():
opt1 = PyRMSProp
opt2 = mx.optimizer.RMSProp
@@ -848,6 +847,9 @@ def test_rms():
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32]:
+ # Reduce foating point compare tolerance to avoid flaky test failure.
+ rtol, atol = (1e-1, 1e-1) if dtype is np.float16 else (1e-2, 1e-2)
+
for cw_option in cw_options:
for cg_option in cg_options:
for center_option in center_options:
@@ -865,9 +867,9 @@ def test_rms():
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
- compare_optimizer(opt1(**kwarg),
opt2(**kwarg), shape, dtype)
+ compare_optimizer(opt1(**kwarg),
opt2(**kwarg), shape, dtype, rtol=rtol, atol=atol)
if (default_context() == mx.cpu()):
- compare_optimizer(opt1(**kwarg),
opt2(**kwarg), shape, dtype, g_stype='row_sparse')
+ compare_optimizer(opt1(**kwarg),
opt2(**kwarg), shape, dtype, g_stype='row_sparse', rtol=rtol, atol=atol)
class PyFtrl(mx.optimizer.Optimizer):
"""The Ftrl optimizer.