You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by zh...@apache.org on 2020/08/27 15:06:39 UTC
[incubator-mxnet] branch master updated: Numpy Pooling and ROI
Pooling Large Dimension Checks (#19013)
This is an automated email from the ASF dual-hosted git repository.
zhasheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new 6b59605 Numpy Pooling and ROI Pooling Large Dimension Checks (#19013)
6b59605 is described below
commit 6b596050e7d82ee2cc09d066273bd8900284566e
Author: Zhaoqi Zhu <zh...@usc.edu>
AuthorDate: Thu Aug 27 08:05:30 2020 -0700
Numpy Pooling and ROI Pooling Large Dimension Checks (#19013)
* add dimension checks
* add checks to pooling, roi_pooling and update tests
* fix sanity
* fix
* sanity
* sanity
Co-authored-by: Zhu <zh...@3c22fbbb4e1a.ant.amazon.com>
Co-authored-by: Ubuntu <ub...@ip-172-31-38-169.us-west-2.compute.internal>
---
src/operator/nn/pooling.cc | 5 ++++
src/operator/roi_pooling-inl.h | 5 ++++
tests/nightly/test_np_large_array.py | 48 +++++++++++++++++++++++-------------
3 files changed, 41 insertions(+), 17 deletions(-)
diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc
index 4c66f2c..d2edcc5 100644
--- a/src/operator/nn/pooling.cc
+++ b/src/operator/nn/pooling.cc
@@ -117,6 +117,11 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs,
<< " Or 4D in (batch, channel, y, x) "
<< " Or 5D in (batch, channel, d, y, x)";
+ for (int i = 0; i < dshape.ndim(); i++) {
+ CHECK_LT(dshape[i], INT32_MAX) << "Pooling does not support large"
+ << " dimensions (>= 2^31).";
+ }
+
int layout = param.GetLayout(dshape.ndim());
if (param.global_pool) {
mxnet::TShape oshape = dshape;
diff --git a/src/operator/roi_pooling-inl.h b/src/operator/roi_pooling-inl.h
index 438d1e8..89a59ab 100644
--- a/src/operator/roi_pooling-inl.h
+++ b/src/operator/roi_pooling-inl.h
@@ -177,6 +177,11 @@ class ROIPoolingProp : public OperatorProperty {
mxnet::TShape dshape = in_shape->at(roipool::kData);
CHECK_EQ(dshape.ndim(), 4U) << "data should be a 4D tensor";
+ for (int i = 0; i < dshape.ndim(); i++) {
+ CHECK_LT(dshape[i], INT32_MAX) << "ROI Pooling does not support large"
+ << "dimensions (>= 2^31).";
+ }
+
// bbox: [num_rois, 5]
mxnet::TShape bshape = in_shape->at(roipool::kBox);
CHECK_EQ(bshape.ndim(), 2U) << "bbox should be a 2D tensor of shape [batch, 5]";
diff --git a/tests/nightly/test_np_large_array.py b/tests/nightly/test_np_large_array.py
index 692b29e..20b762a 100644
--- a/tests/nightly/test_np_large_array.py
+++ b/tests/nightly/test_np_large_array.py
@@ -29,7 +29,8 @@ from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, d
from mxnet import gluon, np, npx
from common import with_seed
import pytest
-
+from tests.python.unittest.common import assertRaises
+from mxnet.base import MXNetError
# dimension constants
MEDIUM_X = 10000
@@ -1004,33 +1005,46 @@ def test_dlpack():
assert C[0][100] == 101
@use_np
-@pytest.mark.skip(reason='broken on large tensors')
-#TODO add 3d pooling test after large tensor is fixed
def test_pooling():
- A = np.ones((1, 2, INT_OVERFLOW))
- A[0][0][2] = 100
+ def test_pooling_large_dim():
+ A = np.ones((1, 1, INT_OVERFLOW))
+ assertRaises(MXNetError, npx.pooling, data=A, kernel=(2), stride=(2), \
+ pool_type='max')
+
+ test_pooling_large_dim()
+ D, H, W = 2**12, 2**10, 2**10
+ A = np.ones((1, 1, D, H ,W))
+ A[0, 0, 0, 0, 2] = 100
A.attach_grad()
with mx.autograd.record():
- B = npx.pooling(data=A, kernel=(2), stride=2, pool_type='max')
- assert B.shape == (1, 2, HALF_INT_OVERFLOW)
- assert B[0][0][1] == 100
+ B = npx.pooling(data=A, kernel=(2, 2, 2), stride=(2, 2, 2), \
+ pool_type='max')
+ assert B.shape == (1, 1, int(D/2), int(H/2), int(W/2))
+ assert B[0, 0, 0, 0, 1] == 100
B.backward()
- assert A.grad.shape == (1, 2, INT_OVERFLOW)
- assert A.grad[0][0][0] == 1
+ assert A.grad.shape == (1, 1, D, H, W)
+ assert A.grad[0, 0, 0, 0, 0] == 1
@use_np
-@pytest.mark.skip(reason='forward gives wrong value on large tensor')
def test_roi_pooling():
- A = np.ones((1, 1, 5, INT_OVERFLOW))
- A[0][0][0][2] = 100
- roi = np.array([[0, 0, 0, 3, 3]])
+ def test_roi_pooling_large_dim():
+ A = np.ones((1, 1, INT_OVERFLOW, 5))
+ roi = np.array([[0, 0, 0, 5, 5]])
+ assertRaises(MXNetError, npx.roi_pooling, A, roi, pooled_size=(3, 3), \
+ spatial_scale=1)
+
+ test_roi_pooling_large_dim()
+ H, W = 2**16, 2**16
+ A = np.ones((1, 1, H, W))
+ A[0, 0, 0, 2] = 100
+ roi = np.array([[0, 0, 0, 5, 5]])
A.attach_grad()
with mx.autograd.record():
- B = npx.roi_pooling(A, roi, pooled_size=(2, 2), spatial_scale=1)
- assert B.shape == (1, 1, 2, 2)
+ B = npx.roi_pooling(A, roi, pooled_size=(3, 3), spatial_scale=1)
+ assert B.shape == (1, 1, 3, 3)
assert B[0][0][0][1] == 100
B.backward()
- assert A.grad.shape == (1, 1, 5, INT_OVERFLOW)
+ assert A.grad.shape == (1, 1, H, W)
assert A.grad[0][0][0][0] == 1
@use_np