You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by zh...@apache.org on 2020/10/13 18:48:35 UTC

[incubator-mxnet] branch master updated: change int to index_t (#19326)

This is an automated email from the ASF dual-hosted git repository.

zhasheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 191341f  change int to index_t (#19326)
191341f is described below

commit 191341f0a848f8c6840d4562fa56929eca949e13
Author: Zhaoqi Zhu <zh...@gmail.com>
AuthorDate: Tue Oct 13 11:46:28 2020 -0700

    change int to index_t (#19326)
---
 src/operator/tensor/matrix_op-inl.h  |  6 +++---
 tests/nightly/test_np_large_array.py | 13 +++++++++++++
 2 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/src/operator/tensor/matrix_op-inl.h b/src/operator/tensor/matrix_op-inl.h
index 52a60a5..7bc623b 100644
--- a/src/operator/tensor/matrix_op-inl.h
+++ b/src/operator/tensor/matrix_op-inl.h
@@ -443,7 +443,7 @@ struct TransposeExKernel {
    * \param ndim     the number of dimension
    */
   template <typename DType>
-  MSHADOW_XINLINE static void Map(int tid,
+  MSHADOW_XINLINE static void Map(index_t tid,
       DType *out_data,
       const DType *in_data,
       const dim_t *strides,
@@ -451,8 +451,8 @@ struct TransposeExKernel {
       ) {
     // tid is the index of input data
     const dim_t* const out_strides = strides + ndim;
-    int k = tid;
-    int out_id = 0;
+    index_t k = tid;
+    index_t out_id = 0;
     for (int i = 0; i < ndim; ++i) {
       out_id += (k / strides[i]) * out_strides[i];
       k %= strides[i];
diff --git a/tests/nightly/test_np_large_array.py b/tests/nightly/test_np_large_array.py
index d08be67..22e5987 100644
--- a/tests/nightly/test_np_large_array.py
+++ b/tests/nightly/test_np_large_array.py
@@ -1886,3 +1886,16 @@ def test_array_split():
     assert out[0][0][0] == 0
     assert out[1][-1][-1] == 2
 
+
+@use_np
+def test_rollaxis():
+    inp = np.zeros((1, 1, 2, INT_OVERFLOW, 1))
+    inp[-1, -1, -1, -1, -1] = 1
+    inp.attach_grad()
+    with mx.autograd.record():
+        out = np.rollaxis(inp, 3)
+        out.backward()
+    assert out.shape == (INT_OVERFLOW, 1, 1, 2, 1)
+    assert out[-1, -1, -1, -1, -1] == 1
+    assert inp.grad.shape == inp.shape
+    assert inp.grad[-1, -1, -1, -1, -1] == 1