You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by sk...@apache.org on 2018/08/15 16:23:48 UTC
[incubator-mxnet] branch master updated: Removed fixed seed and
increased learning rate and tolerance for test_nadam (#12164)
This is an automated email from the ASF dual-hosted git repository.
skm pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new 5b9251b Removed fixed seed and increased learning rate and tolerance for test_nadam (#12164)
5b9251b is described below
commit 5b9251baf0666d9c5d0cdc6b208699b70224cd04
Author: access2rohit <sr...@osu.edu>
AuthorDate: Wed Aug 15 09:23:32 2018 -0700
Removed fixed seed and increased learning rate and tolerance for test_nadam (#12164)
---
tests/python/unittest/test_optimizer.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tests/python/unittest/test_optimizer.py b/tests/python/unittest/test_optimizer.py
index bdd71ee..449cdb4 100644
--- a/tests/python/unittest/test_optimizer.py
+++ b/tests/python/unittest/test_optimizer.py
@@ -943,7 +943,7 @@ def test_ftrl():
compare_optimizer(opt1(lazy_update=True, **kwarg), opt2(**kwarg), shape,
np.float32, w_stype='row_sparse', g_stype='row_sparse')
-@with_seed(1234)
+@with_seed()
def test_nadam():
def get_net(num_hidden, flatten=True):
@@ -965,10 +965,10 @@ def test_nadam():
loss = Loss(output, l)
loss = mx.sym.make_loss(loss)
mod = mx.mod.Module(loss, data_names=('data',), label_names=('label',))
- mod.fit(data_iter, num_epoch=60, optimizer_params={'learning_rate': 0.0005, 'wd': 0.0005},
+ mod.fit(data_iter, num_epoch=60, optimizer_params={'learning_rate': 0.001, 'wd': 0.0005},
initializer=mx.init.Xavier(magnitude=2), eval_metric=mx.metric.Loss(),
optimizer='nadam')
- assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.1
+ assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.11
# AdaGrad
class PyAdaGrad(mx.optimizer.Optimizer):