You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by zh...@apache.org on 2018/06/25 23:35:29 UTC
[incubator-mxnet] branch master updated: Fix flaky test
test_operator.test_binary_op due to numerical errors (#11259)
This is an automated email from the ASF dual-hosted git repository.
zhasheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new 21ff36b Fix flaky test test_operator.test_binary_op due to numerical errors (#11259)
21ff36b is described below
commit 21ff36b06bf47ff2ac4145ce60ec1fe5dd14ce1d
Author: Pedro Larroy <92...@users.noreply.github.com>
AuthorDate: Mon Jun 25 16:35:23 2018 -0700
Fix flaky test test_operator.test_binary_op due to numerical errors (#11259)
Use float64 computations as the reference numpy implementation operates in double and not float.
f64(f32(f64(.))) % f64(f32(f64(.))) is not the same as f64(.) % f64(.) due to limited precission.
fixes #9853
---
tests/python/unittest/test_operator.py | 50 ++++++++++++++++++++++++----------
1 file changed, 36 insertions(+), 14 deletions(-)
diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py
index fbd3886..287d830 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -1550,6 +1550,7 @@ def gen_broadcast_data_int(idx):
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
+ #print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
@@ -1562,27 +1563,46 @@ def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
- x = baseline(d[0], d[1])
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
+ x = baseline(d[0], d[1]).astype(y.dtype)
+
+ #np.set_printoptions(precision=20)
+
+ a = d[0]
+ b = d[1]
+ #print("a: {} {}".format(a.dtype, a))
+ #print("a: {} {}".format(b.dtype, b))
+
+ #print("x: {} {}".format(x.dtype, x))
+ #print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
- print('found precision problem')
+ import binascii
+ np.set_printoptions(precision=20)
+ logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
- print('a: {}'.format(d[0][idx]))
- print('b: {}'.format(d[1][idx]))
- import struct
- print('a hex: {}'.format(struct.pack('d', d[0][idx]).encode('hex')))
- print('b hex: {}'.format(struct.pack('d', np.broadcast_to(d[1], x.shape)[idx]).encode('hex')))
- print('in baseline(a, b): {}'.format(x[idx]))
- print('in symbol(a, b): {}'.format(y[idx]))
- print('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
+ logging.error('input a: {}'.format(d[0][idx]))
+ logging.error('input b: {}'.format(d[1][idx]))
+ logging.error("output x: {} {}".format(x.dtype, x))
+ logging.error("output y: {} {}".format(y.dtype, y))
+ def ftohex(xs):
+ import struct
+ return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
+ logging.error('output x in baseline(a, b): {}'.format(x[idx]))
+ logging.error('output y in symbol(a, b): {}'.format(y[idx]))
+ logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
+ logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
+ logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
+ logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
+
+ logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
@@ -1641,10 +1661,13 @@ def test_binary_op():
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
- c = a % b
+ # Python and numpy operate only in double so to avoid numerical errors we have to use
+ # doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
+ #c = a % b
+ c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
- # Issue exposed with seed 1768433044
- check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data)
+ #check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data)
+ check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
@@ -1675,7 +1698,6 @@ def test_binary_op():
test_bpow(a, b)
test_bneq(a, b)
-
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):