You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by sa...@apache.org on 2020/10/07 21:59:14 UTC

[incubator-mxnet] branch master updated: Numpy polyval large tensor fix (#19306)

This is an automated email from the ASF dual-hosted git repository.

samskalicky pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 510900f  Numpy polyval large tensor fix (#19306)
510900f is described below

commit 510900fa204381be4c25d567cf126f1dd3407a0b
Author: Zhaoqi Zhu <zh...@gmail.com>
AuthorDate: Wed Oct 7 14:57:19 2020 -0700

    Numpy polyval large tensor fix (#19306)
    
    * fix
    
    * tweak
---
 src/operator/numpy/np_polynomial_op-inl.h |  2 +-
 src/operator/numpy/np_polynomial_op.cc    |  4 ++--
 tests/nightly/test_np_large_array.py      | 18 ++++++++++++++++++
 3 files changed, 21 insertions(+), 3 deletions(-)

diff --git a/src/operator/numpy/np_polynomial_op-inl.h b/src/operator/numpy/np_polynomial_op-inl.h
index f3b4424..a2cc044 100644
--- a/src/operator/numpy/np_polynomial_op-inl.h
+++ b/src/operator/numpy/np_polynomial_op-inl.h
@@ -55,7 +55,7 @@ inline bool NumpyPolyvalShape(const nnvm::NodeAttrs& attrs,
 template<int req>
 struct polyval_forward {
   template<typename DType>
-  MSHADOW_XINLINE static void Map(int i,
+  MSHADOW_XINLINE static void Map(index_t i,
                                   DType* out_data,
                                   const DType* p_data,
                                   const DType* x_data,
diff --git a/src/operator/numpy/np_polynomial_op.cc b/src/operator/numpy/np_polynomial_op.cc
index 72df77c..3fc9439 100644
--- a/src/operator/numpy/np_polynomial_op.cc
+++ b/src/operator/numpy/np_polynomial_op.cc
@@ -31,7 +31,7 @@ namespace op {
 template<int req>
 struct polyval_backward_x {
   template<typename DType>
-  MSHADOW_XINLINE static void Map(int i, const DType* p_dptr, const DType* x_dptr,
+  MSHADOW_XINLINE static void Map(index_t i, const DType* p_dptr, const DType* x_dptr,
                                   DType* igrad_x_dptr, const DType* ograd_dptr,
                                   const index_t p_size) {
     DType igrad_x = 0;
@@ -47,7 +47,7 @@ struct polyval_backward_x {
 template<int req>
 struct polyval_backward_p {
   template<typename DType>
-  MSHADOW_XINLINE static void Map(int i, const DType* p_dptr, const DType* x_dptr,
+  MSHADOW_XINLINE static void Map(index_t i, const DType* p_dptr, const DType* x_dptr,
                                   DType* igrad_p_dptr, const DType* ograd_dptr,
                                   const index_t p_size, const index_t x_size) {
     DType igrad_p = 0;
diff --git a/tests/nightly/test_np_large_array.py b/tests/nightly/test_np_large_array.py
index bda45b0..37cab0d 100644
--- a/tests/nightly/test_np_large_array.py
+++ b/tests/nightly/test_np_large_array.py
@@ -1200,6 +1200,24 @@ def test_subtract():
     assert B.grad.shape == (INT_OVERFLOW, 2)
     assert B.grad[0][0] == -1
 
+@use_np
+def test_polyval():
+    poly = np.array([1, 1, 5])
+    inp = np.zeros((2, INT_OVERFLOW))
+    inp[-1, -1] = 2
+    poly.attach_grad()
+    inp.attach_grad()
+    with mx.autograd.record():
+        out = np.polyval(poly, inp)
+        out.backward()
+    assert out.shape == inp.shape
+    assert out[-1, -1] == 11 and out[0, 0] == 5
+    assert inp.grad.shape == inp.shape
+    assert inp.grad[-1, -1] == 5
+    assert poly.grad.shape == poly.shape
+    assert poly.grad[0] == 4
+
+
 '''
                                      _               _
   _ _ _  _ _ __  _ __ _  _   _____ _| |_ ___ _ _  __(_)___ _ _