You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2020/12/08 22:49:47 UTC

[GitHub] [incubator-mxnet] Zha0q1 commented on a change in pull request #19294: Numpy large tensor test batch 4

Zha0q1 commented on a change in pull request #19294:
URL: https://github.com/apache/incubator-mxnet/pull/19294#discussion_r525617370



##########
File path: tests/nightly/test_np_large_array.py
##########
@@ -542,6 +564,113 @@ def test_slice_assign():
     B[-1] = 2
     assert B[-1, 0] == 2 and B[-1, 1] == 2
 
+@use_np
+def test_flatnonzero():
+    inp = np.zeros((2, INT_OVERFLOW))
+    inp[-1, -1] = 1
+    inp.attach_grad()
+    with mx.autograd.record():
+        out = np.flatnonzero(inp)
+        out.backward()
+    assert out.shape == (1, )
+    assert out[0] == int(2 * INT_OVERFLOW - 1)
+    assert inp.grad.shape == inp.shape
+    assert inp.grad[-1, -1] == 0
+
+@use_np
+def test_ravel():
+    inp = np.zeros((2, INT_OVERFLOW))
+    inp[0, -1], inp[-1, -1] = 1, 2
+    inp.attach_grad()
+    with mx.autograd.record():
+        out = np.ravel(inp)
+        out.backward()
+    assert out.shape == (DOUBLE_INT_OVERFLOW, )
+    assert out[INT_OVERFLOW-1] == 1 and out[-1] == 2
+    assert inp.grad.shape == inp.shape
+    assert inp.grad[-1, -1] == 1
+
+@use_np
+def test_mean():
+    inp = np.arange(DOUBLE_INT_OVERFLOW).reshape((2, INT_OVERFLOW))
+    inp.attach_grad()
+    with mx.autograd.record():
+        out = np.mean(inp, axis=1)
+        out.backward()
+    assert out.shape == (2, )
+    assert_almost_equal(out[0], np.array((HALF_INT_OVERFLOW-0.5)), \
+                rtol=1e-3, atol=1e-5)
+    assert inp.grad.shape == inp.shape
+    assert_almost_equal(inp.grad[-1, -1], np.array((1.0/INT_OVERFLOW)), \
+                rtol=1e-3, atol=1e-5)
+
+@use_np
+def test_median():
+    inp = np.arange(DOUBLE_INT_OVERFLOW).reshape((2, INT_OVERFLOW))
+    inp.attach_grad()
+    with mx.autograd.record():
+        out = np.median(inp, axis=1)
+        out.backward()
+    assert out.shape == (2, )
+    assert_almost_equal(out[0], np.array((HALF_INT_OVERFLOW-0.5)), \
+                rtol=1e-3, atol=1e-5)
+    assert inp.grad.shape == inp.shape
+    assert inp.grad[-1, -1] == 0
+
+@use_np
+def test_percentile():
+    # np.percentile and np.quantile share the same implementation
+    inp = np.arange(DOUBLE_INT_OVERFLOW).reshape((2, INT_OVERFLOW))
+    inp.attach_grad()
+    with mx.autograd.record():
+        out = np.percentile(inp, 50, axis=1)
+        out.backward()
+    assert out.shape == (2, )
+    assert_almost_equal(out[0], np.array((HALF_INT_OVERFLOW-0.5)), \
+                rtol=1e-3, atol=1e-5)
+    assert inp.grad.shape == inp.shape
+    assert inp.grad[-1, -1] == 0
+
+@use_np
+def test_shares_memory():
+    # np.shares_memory and np.may_share_memory share the same implementation
+    inp = np.ones((2, INT_OVERFLOW))
+    out = np.shares_memory(inp[0,:100], inp[0,100:])
+    out2 = np.shares_memory(inp[1,:101], inp[1,100:])
+    assert out == False and out2 == True
+
+@use_np
+def test_where():
+    inp1 = np.zeros((2, INT_OVERFLOW))
+    inp1[-1, -1] = 1
+    inp2 = inp1 + 1
+    inp1.attach_grad()
+    inp2.attach_grad()
+    with mx.autograd.record():
+        out = np.where(inp1==0, inp1, inp2)
+        out.backward()
+    assert out.shape == inp1.shape
+    assert out[0, 0] == 0 and out[-1, -1] == 2
+    assert inp1.grad.shape == inp1.shape
+    assert inp1.grad[0, 0] == 1 and inp1.grad[-1 ,-1] == 0
+    assert inp2.grad.shape == inp2.shape
+    assert inp2.grad[0, 0] == 0 and inp2.grad[-1 ,-1] == 1
+    # onse side is scalar
+    with mx.autograd.record():
+        out = np.where(inp1==0, inp1, 2)
+        out.backward()
+    assert out.shape == inp1.shape
+    assert out[0, 0] == 0 and out[-1, -1] == 2
+    assert inp1.grad.shape == inp1.shape
+    assert inp1.grad[0, 0] == 1 and inp1.grad[-1 ,-1] == 0
+    # both sides ar scalar
+    with mx.autograd.record():
+        out = np.where(inp1==0, 0, 2)

Review comment:
       Sure




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org