You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by ap...@apache.org on 2020/03/03 06:03:56 UTC

[incubator-mxnet] branch master updated: [Large Tensor] Fix multi_lars op (#17675)

This is an automated email from the ASF dual-hosted git repository.

apeforest pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 9ed5a38  [Large Tensor] Fix multi_lars op (#17675)
9ed5a38 is described below

commit 9ed5a38651b1c59c671039e7310d2c5b4a0f7094
Author: Connor Goggins <cg...@gmail.com>
AuthorDate: Mon Mar 2 22:02:10 2020 -0800

    [Large Tensor] Fix multi_lars op (#17675)
    
    * Switched dtype for index param i
    
    * Added nightly test for multi_lars
    
    * Value check to trigger lazy evaluation
---
 src/operator/contrib/multi_lars-inl.h |  2 +-
 tests/nightly/test_large_array.py     | 18 ++++++++++++++++++
 2 files changed, 19 insertions(+), 1 deletion(-)

diff --git a/src/operator/contrib/multi_lars-inl.h b/src/operator/contrib/multi_lars-inl.h
index c78bd70..3e9bebe 100644
--- a/src/operator/contrib/multi_lars-inl.h
+++ b/src/operator/contrib/multi_lars-inl.h
@@ -59,7 +59,7 @@ struct LARSParam : public dmlc::Parameter<LARSParam> {
 };
 
 struct MultiLARSKernel {
-  MSHADOW_XINLINE static void Map(int i, float* out_data, const float* lrs,
+  MSHADOW_XINLINE static void Map(index_t i, float* out_data, const float* lrs,
                                   const float* weights_sum_sq, const float* grads_sum_sq,
                                   const float* wds, const float eta, const float eps,
                                   const float rescale_grad, const OpReqType req) {
diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py
index 222c452..cc1977a 100644
--- a/tests/nightly/test_large_array.py
+++ b/tests/nightly/test_large_array.py
@@ -514,6 +514,23 @@ def test_nn():
         assert res.shape[1] == SMALL_Y
         assert res[0][SMALL_Y - 1] == 50.
 
+    def check_multi_lars():
+        lrs = nd.random_normal(shape=(LARGE_TENSOR_SHAPE + 1, 1))
+        weights_sum_sq = nd.random_normal(shape=(LARGE_TENSOR_SHAPE + 1, 1))
+        grads_sum_sq = nd.random_normal(shape=(LARGE_TENSOR_SHAPE + 1, 1))
+        wds = nd.random_normal(shape=(LARGE_TENSOR_SHAPE + 1, 1))
+        eta = .1
+        eps = .9
+
+        out = nd.multi_lars(lrs=lrs, weights_sum_sq=weights_sum_sq, grads_sum_sq=grads_sum_sq,
+                            wds=wds, eta=eta, eps=eps)
+
+        assert out.shape[0] == LARGE_TENSOR_SHAPE + 1
+        assert out.shape[1] == 1
+
+        # Trigger lazy evaluation of the output NDArray and ensure that it has been filled
+        assert type(out[0, 0].asscalar()).__name__ == 'float32'
+
     check_gluon_embedding()
     check_fully_connected()
     check_dense()
@@ -538,6 +555,7 @@ def test_nn():
     check_spatial_transformer()
     check_ravel()
     check_cumsum()
+    check_multi_lars()
 
 
 def test_tensor():