You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by re...@apache.org on 2019/07/17 08:51:52 UTC

[incubator-mxnet] branch numpy updated (d8d6b3b -> 7da1a11)

This is an automated email from the ASF dual-hosted git repository.

reminisce pushed a change to branch numpy
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


    omit d8d6b3b  Numpy Tensordot Operator  (#15349)
    omit a26b201  [Numpy] Numpy hstack (#15302)
    omit 3ea5f8c  numpy eye op (#15282)
    omit 19c7c6e  fix memory override bug in multinomial (#15397)
    omit 1bff19b  [Numpy] Numpy compatible argsort (#15501)
    omit 546f3e8  Numpy Trace (#15258)
    omit e9b73e1  [numpy][doc-fix] zeros_like, linspace, reciprocal, square, and arcsin (#15377)
    omit 44e5b25  [numpy][doc-fix] mean, transpose, stack, split, log2, rint and radians (#15370)
    omit 81747cb  [numpy][doc-fix] sum, copy, tile, argmax, sign, log, degrees (#15382)
    omit 052f90d  [numpy] fix cython (#15418)
    omit d60e105  [numpy] Fix several places in numpy (#15398)
    omit 4dfb7b9  add doc for multinomial, dot, cumsum, clip, abs, exp, arctan (#15386)
    omit 6b7525c  [numpy] Change d2l chapters cv and gan to use numpy (#15368)
    omit a563b56  [numpy] Misc fix for other chapters (#15332)
    omit f803dde  numpy-compatible cumsum (#15309)
    omit 2260667  Numpy compatible linspace (#15256)
    omit 1b62426  Numpy compatible multinomial (#15219)
    omit 3409578  Numpy compatible max (#15161)
    omit 96520cb  [numpy] Fix d2l chapter 5 (#15264)
    omit bf4dc33  [numpy] [DO NOT MERGE] Fix d2l chapters 9 and 13 (#15246)
    omit 93679fe  Numpy-compatible split (#15049)
    omit a00a2ce  fix for ch11 (#15244)
    omit fb10e28  [numpy] Fix d2l chapter8 (#15237)
    omit 4ad56dc  fix for chapter6 conv nn (#15224)
    omit 4e1274f  Fix (#15188)
    omit df47457  [numpy] Fix d2l performance regression (#15173)
    omit 517451b  [WIP][numpy] Fix for D2L Chapters 2/3/4 (#15139)
    omit aa5153f  numpy concatenate (#15104)
    omit 0010c43  [numpy] Fix np branch after rebase (#15086)
    omit ac92ec3  Numpy Unary Ops (#15010)
    omit 9911675  Numpy-compatible stack (#15027)
    omit 877e0e9  Temporarily disable test_amp
    omit 6e33cc1  Change np_compat to np_shape
    omit 308bcc6  [numpy] Refactor np module (example runs through) (#15055)
    omit 043f01e  [numpy] Refactor np modules (#14989)
    omit 0ee508e  [numpy] Some np ops for d2l (#14924)
    omit 4325676  numpy-compatible mean (#14859)
    omit 7a12654  [numpy] Numpy dot (#14831)
    omit 355254e  Enable np op compat check with name prefix (#14897)
    omit 2f71241  [numpy] Infra for supporting numpy ops in imperative mode and Gluon APIs (#14758)
    omit ffd690d  [Do not review] [Do not merge] New numpy-compatible sum (#14739)
     add 28c528e  [clojure] clojurify function names in image.clj namespace (#15121)
     add 134a3e8  fix nightly (#15141)
     add 910583e  fix misspell (#15149)
     add c474f5f  Update env_var.md (#15153)
     add 3f4f3d5  fix kvstore nightly failure (#15156)
     add 6c00a5a  update LICENSE (#15128)
     add 77da1e2  add cast_dtype option to load parameters (#15168)
     add eaacbaf  Fix Scala release (#15155)
     add 0337451  Python test failures under windows now report the exit code (#15147)
     add bcff498  reenable two unit tests (#15116)
     add 745a41c  [MXNET-1416] Fix inception inference example for potential index out of range error. (#15179)
     add c5874dd  [BUGFIX] Fix a bug in Auto Function. (#15184)
     add e01c386  min fix of 'storage_type' index mismatch (#14560)
     add 21d7ac0  Fix installation dependencies (#14987)
     add 99f8fc9  don't check for nullptr before deleting; closes #14580 (#14901)
     add 8d8c5d5  fix graident of boolean mask (#15175)
     add b64e00a  Fix wrong description of output range of ToTensor (#14794)
     add 3c82ce2  [MXNET-978] Second order gradient support for some unary operators (#14613)
     add 8b2ae57  Fix MKLDNNDataReorderAsync (#15163)
     add 1c3e964  fix the cmake cmd with wrong path (#15111)
     add 62a85f3  Broken link fixes for the website (#15205)
     add d32a3da  [MKLDNN] Fix quantized act and concat (#15209)
     add 29fea48  Readd Java HelloWorld example for intellij tutorial (#15217)
     add 769b882  Add missing file to Maven clean (#15216)
     add 2e20094  Fixed a bug in Gluon DataLoader. (#15195)
     add 579c5ab  Docs fixes (NDabs, Proposal, MultiProposal) (#15185)
     add 027b547  Bugfix: accept NN as an inter_method for augmentation in ImageRecordIā€¦ (#15221)
     add b2e6dac  fix imresize interp docs (#15066)
     add 0be6d7e  Add an utility for operator benchmarks (#14977)
     add e5902ec  Update float16 tutorial (#15107)
     add a862270  Make MXNDArrayFromDLPack backward compatible (#15227)
     add 09202f7f Improve static cached_op optimization (#15187)
     add b58cf0a  Updated Image Augmentation tutorial to use Gluon Transforms. (#15197)
     add b8b352d  Cleaned up profiling tutorial (#15228)
     add 41d35c4  [TUTORIAL] Add multiple GPUs training tutorial (#15158)
     add 3b663ef  [MXNET-1415]Add MXEnginePushAsyncND and MXEnginePushSyncND C APIs (#15177)
     add 13cf5db  Profiler API Enhancements (#15132)
     add 45844b2  Sort benchmark output per op name, add some validation to CLI (#15249)
     add 5b1603f  Fix nightly build warning (#15248)
     add c4ea674  [Dependency Update] Bump up cuDNN & NCCL version (#15142)
     add 85aaa3a  Fix horovod build failure when mxnet is built from source (#15213)
     add cab1dfa  Upgrade archive utility and add back FC improvement (#15171)
     add d7e2139  [MXNET-1417][Performance] Caching Dynamic Shape Checking Result (#15262)
     add ccbbf6b  Fix java install docs (#15250)
     add 6f60b9b  fix span issue on tutorial index (#15279)
     add eb48370  Added transform tutorial (#15114)
     add 145f82d  Updating SymbolBlock.imports to support different dtypes (#15230)
     add 4d96671  fixing var-seq-len rnn backward() operator (#15278)
     add 2b7fbc5  [bug] fix higher grad log  (#15120)
     add 12c4226  update committer info (#15289)
     add 2de0db0  Showing proper error when csr array is not 2D in shape. (#15242)
     add c45d23b  Proper bulking of ops not using FCompute (#15272)
     add 4a9e9f6  Typo fix in plan_memory relase -> release. (#15299)
     add 3f8fd00  Fixing duplication in operator profiling (#15240)
     add 8b5f376  [MXNET-1413] Adding Large Tensor support for sort operators (#15170)
     add b4ce4e7  improve layernorm CPU performance (#15313)
     add e6fad30  Efficient MXNet sampling in the multinomial distribution (#15311)
     add 0340536  indent changes (#15321)
     add f44f6cf  Extend Clojure BERT example (#15023)
     add 7fe478a  Fix build_ccache_wrappers: (#14631)
     add 51acd4d  [MXNET-1086] added sub and mul to ONNX->TensorRT conversion (#15344)
     add 009907a  [C++] Improve inference script to support benchmark on Imagenet (#15164)
     add ba30644  [DOC] Clarify that global pooling is going to reset padding (#15269)
     add 582489c  Fix Cached_op with static_shape=true (#15298)
     add cd19367  add 'asnumpy' dtype option to check_symbolic_backward (#15186)
     add 92fce90  Custom Operator Profiling Enhancement (#15210)
     add e8f3e91  [Opperf] Make module/namespace of the operator parameterized (#15226)
     add 8aaacde  Update sparse_retain Documentation (#15394)
     add 11e6d45  [AMP] Move topk from FP16_FP32_FUNCS to FP32_FUNCS (#15342)
     add ca565a0  Conversion from FP32 model to Mixed Precision model (#15118)
     add 06df38c  point fix the vector declaration in MultiBoxDetection (#15300)
     add b869ecd  [Clojure] Add fastText example (#15340)
     add d74b993  Revert default return type for indices in argsort() and topk() back to float32 (#15360)
     add 7210cc4  nano instructions (#15117)
     add c6bb2ce  Use omp threads for cpu data loader (#15379)
     add 512a491  Temporarily Commenting out Flaky Test (#15436)
     add 1547578  Remove mhard-float option. This is already deprecated by Google. (#15435)
     add 6a8d9eb  [MXNET-978] Higher order gradient for sigmoid (#15288)
     add 3df3e2c  [TUTORIAL] Gluon performance tips and tricks (#15427)
     add d49445f  Updating profiler tutorial to include new custom operator profiling (#15403)
     add 5078853  remove comments from nano instructions (#15433)
     add fc54781  enable TensorRT integration with cpp api (#15335)
     add faccc59  Fix memory leak in NaiveEngine (#15405)
     add d7c542a  [TUTORIAL] Gluon and Sparse NDArray (#15396)
     add 8ebaa5c  REAME   MTCNN   Link URL Error in original website (#15020)
     add 0ec4886  Expose get_all_registered_operators and get_operator_arguments in theā€¦ (#15364)
     add 612b9d1  Update Horovod docs links in README (#15366)
     add 74dbadb  [TUTORIAL] Revise Naming tutorial (#15365)
     add 728b8db  Revise Symbol tutorial (#15343)
     add dfe923a  Update fp16 docs: Block.cast is inplace (#15458)
     add bc570db  [OP] Add a new arange_like operator to contrib (#15400)
     add a6ed12f  Had a few PRs merged. Hope to become an official contributor and potentially a commiter. (#15451)
     add a3ae309  [MXNET-978] Higher Order Gradient Support `reciprocal`, `abs`. (#15413)
     add 091fece  fix fp32 flatten issue (#15351)
     add 1ae73de  fix doc for sort and argsort (#15317)
     add 09c71bf  Add Sparse NDArray support for Scala (#15378)
     add b62aaf3  fix comment (#15481)
     add e02c0a2  Upgrade MKL-DNN submodule to v0.20 release (#15422)
     add 6b2b927  [Perl] - simplify aliasing strategy (#15395)
     add d82c89a  Opperf: Support Python<3.6 (#15487)
     add 7d4d1bc  Two fixes for info_gan.md example Code (#15323)
     add 6191dd7  fix the bug on Scala Sparse (#15500)
     add 7a83883  Improve docs for AMP (#15455)
     add 68460a9  [Doc] Add MKL install method apt/yum into tutorial (#15491)
     add 1f3195f  Julia docs (#15454)
     add 5ffd598  CI: upgrade Julia version from 1.0.3 to 1.0.4 (#15502)
     add 554b196  Rebase #13757 to master (#15189)
     add 6a564be  cuda/cuDNN lib version checking.  Force cuDNN v7 usage. (#15449)
     add 9ff6c46  Add -R option to ci/build.py to avoid rebuilding containers (#15426)
     add 2565fa2  fix nightly CI failure (#15452)
     add 9c5acb4  Accelerate ROIPooling layer (#14894)
     add cbb6f7f  Docs: Fix misprints (#15505)
     add d677d1a  FP16 Support for C Predict API (#15245)
     add b25ec8e  Improve diagnose.py, adding build features info and binary library path. (#15499)
     add 38a44db  update ratcheck for apache-rat 0.13 release (#15417)
     add 6acf7e6  Small typo fixes in batch_norm-inl.h (#15527)
     add b88705e  fix heap-use-after-free in scala (#15503)
     add 9724ce6  Avoid memory copy for dropout inference (#15521)
     add 41ecf58  add julia env settings (#15523)
     add 1b725c3  [MKLDNN] Independent gradients requests check with respect to weights and bias of convolution (#15497)
     add 9777717  website build for julia: fix path to be static (#15554)
     add 300cb69  some minor typos/clarifications (#15538)
     add 4d07d78  broadcast axis is alias to broadcast axes; doc fix (#15546)
     add 00df447  [Do not review] [Do not merge] New numpy-compatible sum (#14739)
     add d6ead71  [numpy] Infra for supporting numpy ops in imperative mode and Gluon APIs (#14758)
     add d47f581  Enable np op compat check with name prefix (#14897)
     add cff3c9a  [numpy] Numpy dot (#14831)
     add 410201d  numpy-compatible mean (#14859)
     add 4d7415b  [numpy] Some np ops for d2l (#14924)
     add cd5364b  [numpy] Refactor np modules (#14989)
     add 5435c5c  [numpy] Refactor np module (example runs through) (#15055)
     add 2384192  Change np_compat to np_shape
     add 9548980  Temporarily disable test_amp
     add c648474  Numpy-compatible stack (#15027)
     add 8a29622  Numpy Unary Ops (#15010)
     add 4a27996  [numpy] Fix np branch after rebase (#15086)
     add d4cb272  numpy concatenate (#15104)
     add 8ce9032  [WIP][numpy] Fix for D2L Chapters 2/3/4 (#15139)
     add fbad391  [numpy] Fix d2l performance regression (#15173)
     add e9ad4ad  Fix (#15188)
     add a14fe95  fix for chapter6 conv nn (#15224)
     add fbeab3f  [numpy] Fix d2l chapter8 (#15237)
     add 6d9e335  fix for ch11 (#15244)
     add 2c68e00  Numpy-compatible split (#15049)
     add e2c724e  [numpy] [DO NOT MERGE] Fix d2l chapters 9 and 13 (#15246)
     add 268eb41  [numpy] Fix d2l chapter 5 (#15264)
     add 792d9d4  Numpy compatible max (#15161)
     add f741658  Numpy compatible multinomial (#15219)
     add 093cac6  Numpy compatible linspace (#15256)
     add 98c18b5  numpy-compatible cumsum (#15309)
     add fcea68b  [numpy] Misc fix for other chapters (#15332)
     add e3e9438  [numpy] Change d2l chapters cv and gan to use numpy (#15368)
     add ecb261b  add doc for multinomial, dot, cumsum, clip, abs, exp, arctan (#15386)
     add 44e8477  [numpy] Fix several places in numpy (#15398)
     add b847f62  [numpy] fix cython (#15418)
     add 77aaf12  [numpy][doc-fix] sum, copy, tile, argmax, sign, log, degrees (#15382)
     add 3410cae  [numpy][doc-fix] mean, transpose, stack, split, log2, rint and radians (#15370)
     add 236cfa6  [numpy][doc-fix] zeros_like, linspace, reciprocal, square, and arcsin (#15377)
     add ff368ae  Numpy Trace (#15258)
     add 435966a  [Numpy] Numpy compatible argsort (#15501)
     add b62ae57  fix memory override bug in multinomial (#15397)
     add 28bb0ca  numpy eye op (#15282)
     add 80476c3  [Numpy] Numpy hstack (#15302)
     add 255793a  Numpy Tensordot Operator  (#15349)
     new 7da1a11  Fix build failure

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (d8d6b3b)
            \
             N -- N -- N   refs/heads/numpy (7da1a11)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .gitignore                                         |   5 +-
 3rdparty/mkldnn                                    |   2 +-
 CMakeLists.txt                                     |   2 +-
 CONTRIBUTORS.md                                    |   4 +-
 Makefile                                           |  32 +-
 NEWS.md                                            |   4 +-
 R-package/R/viz.graph.R                            |   4 +-
 amalgamation/Makefile                              |   4 +-
 amalgamation/python/mxnet_predict.py               | 131 +++-
 {tools/coreml/converter => benchmark}/__init__.py  |   0
 benchmark/opperf/README.md                         | 183 ++++++
 .../converter => benchmark/opperf}/__init__.py     |   0
 .../opperf/custom_operations}/__init__.py          |   0
 .../opperf/custom_operations/custom_operations.py  |  67 +++
 benchmark/opperf/nd_operations/README.md           | 143 +++++
 .../opperf/nd_operations}/__init__.py              |   0
 benchmark/opperf/nd_operations/binary_operators.py |  92 +++
 benchmark/opperf/nd_operations/gemm_operators.py   |  88 +++
 .../nd_operations/nn_activation_operators.py       | 107 ++++
 .../opperf/nd_operations/nn_basic_operators.py     |  83 +++
 .../opperf/nd_operations/nn_conv_operators.py      | 137 +++++
 .../nd_operations/random_sampling_operators.py     |  61 ++
 .../opperf/nd_operations/reduction_operators.py    |  58 ++
 benchmark/opperf/nd_operations/unary_operators.py  |  62 ++
 benchmark/opperf/opperf.py                         | 151 +++++
 .../mxnet_operator_benchmark_results_cpu.md        | 322 ++++++++++
 .../mxnet_operator_benchmark_results_gpu.md        | 322 ++++++++++
 .../opperf/rules}/__init__.py                      |   0
 benchmark/opperf/rules/default_params.py           |  88 +++
 .../opperf/utils}/__init__.py                      |   0
 benchmark/opperf/utils/benchmark_utils.py          | 132 ++++
 benchmark/opperf/utils/common_utils.py             | 130 ++++
 benchmark/opperf/utils/ndarray_utils.py            | 127 ++++
 benchmark/opperf/utils/op_registry_utils.py        | 276 +++++++++
 benchmark/opperf/utils/profiler_utils.py           | 189 ++++++
 ci/build.py                                        |  10 +-
 ci/docker/Dockerfile.build.centos7_gpu             |   2 +-
 ci/docker/Dockerfile.build.ubuntu_base_gpu         |   2 +-
 ci/docker/Dockerfile.build.ubuntu_build_cuda       |   4 +-
 ci/docker/Dockerfile.build.ubuntu_gpu_cu100        |   2 +-
 ci/docker/Dockerfile.build.ubuntu_gpu_cu90         |   2 +-
 ci/docker/Dockerfile.build.ubuntu_gpu_cu92         |   2 +-
 ci/docker/Dockerfile.build.ubuntu_nightly_gpu      |   2 +-
 .../install/{ubuntu_tutorials.sh => ubuntu_ar.sh}  |  17 +-
 ci/docker/install/ubuntu_julia.sh                  |   2 +-
 ci/docker/install/ubuntu_tutorials.sh              |   4 +-
 ci/docker/runtime_functions.sh                     | 101 ++--
 ci/jenkins/Jenkins_steps.groovy                    |  21 +-
 ci/jenkins/Jenkinsfile_website                     |   1 -
 ci/windows/test_py2_cpu.ps1                        |   6 +-
 ci/windows/test_py2_gpu.ps1                        |  10 +-
 ci/windows/test_py3_cpu.ps1                        |   6 +-
 ci/windows/test_py3_gpu.ps1                        |  10 +-
 contrib/clojure-package/README.md                  |   2 +-
 .../examples/bert/fine-tune-bert.ipynb             | 145 ++++-
 .../bert/src/bert/bert_sentence_classification.clj | 113 +++-
 .../bert/bert_sentence_classification_test.clj     |  24 +-
 .../examples/cnn-text-classification/README.md     |  22 +-
 .../cnn-text-classification/get_fasttext_data.sh   |  11 +-
 .../src/cnn_text_classification/data_helper.clj    |  37 +-
 .../predictor/src/infer/predictor_example.clj      |   6 +
 .../src/org/apache/clojure_mxnet/image.clj         | 116 +++-
 .../test/org/apache/clojure_mxnet/image_test.clj   |  63 +-
 cpp-package/example/get_data.sh                    |   9 +-
 cpp-package/example/inference/README.md            | 122 +++-
 .../example/inference/imagenet_inference.cpp       | 662 +++++++++++++++++++++
 .../example/inference/inception_inference.cpp      | 448 --------------
 .../inference/unit_test_imagenet_inference.sh      |  63 ++
 .../inference/unit_test_inception_inference.sh     |  42 --
 cpp-package/example/mlp_csv.cpp                    |   1 +
 cpp-package/include/mxnet-cpp/MxNetCpp.h           |   1 +
 cpp-package/include/mxnet-cpp/contrib.h            | 115 ++++
 cpp-package/include/mxnet-cpp/initializer.h        |  50 ++
 cpp-package/include/mxnet-cpp/io.h                 |   2 +
 cpp-package/include/mxnet-cpp/ndarray.h            |   6 +
 cpp-package/include/mxnet-cpp/ndarray.hpp          |  13 +-
 cpp-package/include/mxnet-cpp/symbol.h             |  17 +
 cpp-package/include/mxnet-cpp/symbol.hpp           |  35 ++
 cpp-package/tests/ci_test.sh                       |   4 +-
 docs/_static/mxnet-theme/index.html                |   2 +-
 .../tutorials/tensorrt/wavenet_optimized.png       | Bin
 .../tutorials/tensorrt/wavenet_unoptimized.png     | Bin
 docs/api/julia/index.md                            |  11 +-
 docs/api/python/gluon/gluon.md                     |   2 +-
 docs/api/python/optimization/optimization.md       |   3 +
 docs/community/contribute.md                       |   1 -
 docs/community/ecosystem.md                        |   2 +-
 docs/faq/env_var.md                                |  16 +-
 docs/faq/float16.md                                | 175 ++++--
 docs/faq/index.md                                  |   2 +-
 docs/install/build_from_source.md                  |  50 +-
 docs/install/c_plus_plus.md                        |   6 +-
 docs/install/index.md                              |  86 +--
 docs/install/install-jetson.md                     | 231 +++++++
 docs/install/java_setup.md                         |  18 +-
 docs/install/osx_setup.md                          |  14 +-
 docs/install/raspbian_setup.md                     |  25 -
 docs/install/scala_setup.md                        |  12 +-
 docs/install/tx2_setup.md                          |  25 -
 docs/install/ubuntu_setup.md                       | 118 +++-
 docs/install/windows_setup.md                      |  10 +-
 docs/mxdoc.py                                      |  15 +-
 docs/settings.ini                                  |  19 +-
 docs/tutorials/amp/amp_tutorial.md                 |  90 ++-
 docs/tutorials/basic/symbol.md                     | 370 ++++++------
 docs/tutorials/c++/basics.md                       |   4 +-
 docs/tutorials/embedded/wine_detector.md           |   2 +-
 docs/tutorials/gluon/data_augmentation.md          | 219 +++++--
 docs/tutorials/gluon/info_gan.md                   |   4 +-
 docs/tutorials/gluon/multi_gpu.md                  | 193 ++++++
 docs/tutorials/gluon/naming.md                     | 148 ++---
 docs/tutorials/gluon/performance.md                | 483 +++++++++++++++
 docs/tutorials/gluon/transforms.md                 | 173 ++++++
 docs/tutorials/index.md                            |  11 +-
 docs/tutorials/java/mxnet_java_on_intellij.md      |   4 +-
 docs/tutorials/java/ssd_inference.md               |  30 +-
 docs/tutorials/mkldnn/MKLDNN_README.md             |   4 +-
 docs/tutorials/mkldnn/operator_list.md             |   2 +
 docs/tutorials/python/kvstore.md                   |  11 +-
 docs/tutorials/python/profiler.md                  | 185 ++++--
 docs/tutorials/python/profiler_nvprof.png          | Bin 235747 -> 0 bytes
 docs/tutorials/python/profiler_nvprof_zoomed.png   | Bin 254663 -> 0 bytes
 docs/tutorials/python/profiler_winograd.png        | Bin 75450 -> 0 bytes
 docs/tutorials/sparse/train.md                     |  10 +-
 docs/tutorials/sparse/train_gluon.md               | 470 +++++++++++++++
 docs/tutorials/tensorrt/inference_with_trt.md      |   4 +-
 example/README.md                                  |   2 +-
 .../automatic-mixed-precision}/README.md           |  20 +-
 .../amp_model_conversion.py                        | 119 ++++
 .../common                                         |   0
 example/cnn_text_classification/README.md          |   4 +
 example/cnn_text_classification/data_helpers.py    |   3 +
 example/distributed_training-horovod/README.md     |   8 +-
 example/gan/CGAN_mnist_R/README.md                 |   2 +-
 example/gluon/embedding_learning/README.md         |   4 +
 .../gluon/embedding_learning/get_cub200_data.sh    |   6 +-
 example/gluon/style_transfer/README.md             |   4 +
 .../style_transfer/dataset/download_dataset.py     |   4 +
 example/gluon/super_resolution/README.md           |  11 +
 example/gluon/super_resolution/super_resolution.py |   4 +-
 example/image-classification/common/fit.py         |   4 +-
 example/image-classification/train_imagenet.py     |   4 +-
 .../matrix_factorization/get_data.py               |   4 +
 example/quantization/README.md                     |  18 +-
 example/recommenders/README.md                     |   3 +
 example/recommenders/movielens_data.py             |   4 +
 example/sparse/matrix_factorization/data.py        |   4 +
 include/mkldnn                                     |   1 -
 include/mkldnn/mkldnn.h                            |   1 +
 include/mkldnn/mkldnn_types.h                      |   1 +
 include/mxnet/c_api.h                              | 143 ++++-
 include/mxnet/c_predict_api.h                      |  65 ++
 include/mxnet/ndarray.h                            |   2 +-
 include/mxnet/resource.h                           |   8 +-
 mkldnn.mk                                          |   3 +-
 perl-package/AI-MXNet/lib/AI/MXNet.pm              | 116 +---
 perl-package/AI-MXNet/lib/AI/MXNet/AutoGrad.pm     |  19 +-
 perl-package/AI-MXNet/lib/AI/MXNet/Base.pm         |  16 +-
 perl-package/AI-MXNet/lib/AI/MXNet/Callback.pm     |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/Context.pm      |   8 +-
 perl-package/AI-MXNet/lib/AI/MXNet/Contrib.pm      |  10 +-
 .../AI-MXNet/lib/AI/MXNet/Contrib/NDArray.pm       |   1 +
 .../AI-MXNet/lib/AI/MXNet/Contrib/Symbol.pm        |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/CudaModule.pm   |   5 +-
 perl-package/AI-MXNet/lib/AI/MXNet/Engine.pm       |   3 +-
 perl-package/AI-MXNet/lib/AI/MXNet/Gluon.pm        |  40 +-
 perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Block.pm  |   7 +-
 perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Data.pm   |   4 +-
 .../AI-MXNet/lib/AI/MXNet/Gluon/Data/Vision.pm     |   3 +
 perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Loss.pm   |   3 +-
 perl-package/AI-MXNet/lib/AI/MXNet/Gluon/NN.pm     |  21 +-
 .../AI-MXNet/lib/AI/MXNet/Gluon/NN/Activation.pm   |   2 +
 .../AI-MXNet/lib/AI/MXNet/Gluon/NN/BasicLayers.pm  |   4 +-
 .../AI-MXNet/lib/AI/MXNet/Gluon/NN/ConvLayers.pm   |   4 +-
 .../AI-MXNet/lib/AI/MXNet/Gluon/Parameter.pm       |   5 +
 perl-package/AI-MXNet/lib/AI/MXNet/Gluon/RNN.pm    |  21 +-
 .../AI-MXNet/lib/AI/MXNet/Gluon/Trainer.pm         |   3 +
 perl-package/AI-MXNet/lib/AI/MXNet/Gluon/Utils.pm  |   2 +-
 perl-package/AI-MXNet/lib/AI/MXNet/IO.pm           |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/Image.pm        |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/Initializer.pm  |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/KVStore.pm      |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/LinAlg.pm       |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/Metric.pm       |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/Module.pm       |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/Module/Base.pm  |   2 +-
 perl-package/AI-MXNet/lib/AI/MXNet/Monitor.pm      |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/NDArray.pm      |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/NS.pm           |  78 +++
 perl-package/AI-MXNet/lib/AI/MXNet/Optimizer.pm    |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/RNN.pm          |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/Random.pm       |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/RecordIO.pm     |   1 +
 perl-package/AI-MXNet/lib/AI/MXNet/Symbol.pm       |   1 +
 .../AI-MXNet/lib/AI/MXNet/Symbol/AttrScope.pm      |  12 +-
 .../AI-MXNet/lib/AI/MXNet/Symbol/NameManager.pm    |   6 +-
 perl-package/AI-MXNet/lib/AI/MXNet/TestUtils.pm    |   9 +-
 .../AI-MXNet/lib/AI/MXNet/Visualization.pm         |   1 +
 perl-package/AI-MXNet/t/AI-MXNet.t                 |   5 +-
 perl-package/AI-MXNet/t/test_multi_device_exec.t   |   4 +-
 python/mxnet/autograd.py                           |   3 +-
 python/mxnet/contrib/amp/amp.py                    | 353 ++++++++++-
 python/mxnet/contrib/amp/lists/symbol.py           |   2 +-
 .../mxnet/contrib/onnx/onnx2mx/_op_translations.py |   2 +-
 python/mxnet/contrib/onnx/onnx2mx/import_model.py  |   2 +-
 python/mxnet/contrib/text/embedding.py             |   2 +-
 python/mxnet/gluon/block.py                        |  30 +-
 python/mxnet/gluon/contrib/rnn/rnn_cell.py         |   3 +-
 python/mxnet/gluon/data/dataloader.py              |   8 +-
 python/mxnet/gluon/data/vision/transforms.py       |   2 +-
 python/mxnet/gluon/loss.py                         |   2 -
 python/mxnet/gluon/nn/basic_layers.py              |   4 +-
 python/mxnet/gluon/parameter.py                    | 100 +++-
 python/mxnet/gluon/utils.py                        |  52 +-
 python/mxnet/image/image.py                        |   4 +-
 python/mxnet/module/base_module.py                 |   2 +-
 python/mxnet/module/executor_group.py              |   8 +
 python/mxnet/module/python_module.py               |   2 +-
 python/mxnet/ndarray/ndarray.py                    |   8 +-
 python/mxnet/numpy_extension/__init__.py           |   5 +-
 python/mxnet/operator.py                           |  61 +-
 python/mxnet/profiler.py                           |  37 +-
 python/mxnet/test_utils.py                         |  92 ++-
 scala-package/README.md                            |  28 +-
 scala-package/core/pom.xml                         |   1 +
 .../src/main/scala/org/apache/mxnet/DType.scala    |  17 +-
 .../src/main/scala/org/apache/mxnet/Executor.scala |   9 +-
 .../src/main/scala/org/apache/mxnet/LibInfo.scala  |  27 +-
 .../src/main/scala/org/apache/mxnet/NDArray.scala  |  67 ++-
 .../main/scala/org/apache/mxnet/SparseFormat.scala |  25 +
 .../scala/org/apache/mxnet/SparseNDArray.scala     | 196 ++++++
 .../scala/org/apache/mxnet/module/BaseModule.scala |   2 +-
 .../test/scala/org/apache/mxnet/NDArraySuite.scala |  16 +
 .../org/apache/mxnet/SparseNDArraySuite.scala      |  93 +++
 scala-package/deploy/pom.xml                       |  50 +-
 .../javaapi/infer/objectdetector/README.md         |   2 +-
 .../javaapi/infer/predictor/README.md              |   2 +-
 .../mxnetexamples/infer/objectdetector/README.md   |   2 +-
 scala-package/externalPom/pom.xml                  | 152 +++++
 .../src/main/deploy/deploy.xml                     |   0
 scala-package/mxnet-demo/java-demo/README.md       |   3 +-
 .../java-demo/src/main/java/mxnet/HelloWorld.java  |  32 +
 .../main/native/org_apache_mxnet_native_c_api.cc   |  82 ++-
 .../main/native/org_apache_mxnet_native_c_api.h    |  48 +-
 scala-package/pom.xml                              |   2 +
 src/c_api/c_api.cc                                 |  47 +-
 src/c_api/c_api_profile.cc                         |  18 +-
 src/c_api/c_api_symbolic.cc                        | 204 +++++++
 src/c_api/c_predict_api.cc                         | 140 ++++-
 src/common/cuda_utils.cc                           | 105 ++++
 src/common/cuda_utils.h                            |  27 +
 src/common/exec_utils.h                            |   5 +-
 src/engine/naive_engine.cc                         |  24 +-
 src/engine/threaded_engine.cc                      |  10 +-
 src/engine/threaded_engine.h                       |   1 +
 src/executor/graph_executor.cc                     |   4 +-
 src/imperative/cached_op.cc                        |  15 +-
 src/imperative/cached_op.h                         |   1 +
 src/imperative/imperative.cc                       |   4 +
 src/imperative/imperative_utils.h                  |  11 +-
 src/io/image_aug_default.cc                        |   6 +-
 src/io/image_det_aug_default.cc                    |   2 +-
 src/io/iter_image_recordio_2.cc                    |  27 +-
 src/io/iter_mnist.cc                               |   2 +-
 src/kvstore/kvstore_dist.h                         |   4 +-
 src/ndarray/ndarray.cc                             |  15 +-
 src/nnvm/amp_infer_unknown.cc                      | 148 +++++
 src/nnvm/legacy_op_util.cc                         |  47 +-
 src/nnvm/low_precision_pass.cc                     | 261 ++++++++
 src/nnvm/plan_memory.cc                            |   2 +-
 src/operator/contrib/boolean_mask-inl.h            |   9 +-
 src/operator/contrib/boolean_mask.cc               |  20 +-
 src/operator/contrib/boolean_mask.cu               |   4 +-
 src/operator/contrib/multi_proposal-inl.h          |   5 +-
 src/operator/contrib/multibox_detection.cc         |   3 +-
 src/operator/contrib/proposal-inl.h                |   5 +-
 src/operator/custom/custom-inl.h                   |  23 +-
 src/operator/custom/custom.cc                      |   4 +-
 src/operator/image/image_random.cc                 |   2 +-
 src/operator/mkl_functions-inl.h                   |  15 +-
 src/operator/nn/batch_norm-inl.h                   |   5 +-
 src/operator/nn/dropout-inl.h                      |   2 +
 src/operator/nn/fully_connected-inl.h              |  14 +-
 src/operator/nn/fully_connected.cc                 |   2 -
 src/operator/nn/mkldnn/mkldnn_base-inl.h           |   2 +
 src/operator/nn/mkldnn/mkldnn_convolution.cc       |  15 +-
 src/operator/nn/mkldnn/mkldnn_flatten.cc           |  87 +++
 src/operator/nn/mkldnn/mkldnn_ops-inl.h            |   9 +-
 src/operator/nn/mkldnn/mkldnn_reshape-inl.h        |  68 +++
 src/operator/nn/mkldnn/mkldnn_reshape.cc           | 185 +++---
 src/operator/nn/pooling.cc                         |   4 +-
 src/operator/numpy/np_init_op.cc                   |   2 +-
 src/operator/numpy/np_init_op.cu                   |   2 +-
 src/operator/quantization/quantized_activation.cc  |  12 +-
 src/operator/quantization/quantized_concat.cc      |  14 +-
 src/operator/random/sample_multinomial_op.h        |  42 +-
 src/operator/rnn-inl.h                             |  18 +-
 src/operator/roi_pooling-inl.h                     |  11 +-
 src/operator/roi_pooling.cc                        | 128 +---
 src/operator/roi_pooling.cu                        | 127 +---
 src/operator/subgraph/tensorrt/nnvm_to_onnx-inl.h  |  12 +
 src/operator/subgraph/tensorrt/nnvm_to_onnx.cc     |  12 +
 src/operator/tensor/broadcast_reduce_op_value.cc   |   2 +
 src/operator/tensor/diag_op-inl.h                  |   2 +-
 src/operator/tensor/elemwise_binary_op_extended.cu |   2 +-
 src/operator/tensor/elemwise_unary_op_basic.cc     | 174 +++++-
 src/operator/tensor/elemwise_unary_op_trig.cc      |  60 +-
 src/operator/tensor/init_op.cc                     |  38 +-
 src/operator/tensor/init_op.cu                     |   5 +-
 src/operator/tensor/init_op.h                      |  66 +-
 src/operator/tensor/matrix_op.cc                   |  22 +-
 src/operator/tensor/ordering_op-inl.h              | 179 +++---
 src/operator/tensor/ordering_op.cc                 |   4 +-
 src/operator/tensor/sparse_retain.cc               |   6 +-
 src/profiler/aggregate_stats.cc                    | 253 ++++++--
 src/profiler/aggregate_stats.h                     |  21 +-
 src/profiler/custom_op_profiler.h                  | 125 ++++
 src/profiler/profiler.h                            |  68 ++-
 src/resource.cc                                    |  22 +-
 tests/cpp/engine/threaded_engine_test.cc           |  48 ++
 tests/cpp/operator/mkldnn_test.cc                  |   4 +-
 tests/nightly/JenkinsfileForBinaries               |   9 +-
 tests/nightly/estimator/test_estimator_cnn.py      |  32 +-
 tests/nightly/estimator/test_sentiment_rnn.py      |  84 +--
 tests/nightly/test_large_array.py                  |  26 +
 tests/python/gpu/test_contrib_amp.py               | 428 +++++++++++++
 tests/python/gpu/test_gluon_gpu.py                 |  58 +-
 tests/python/gpu/test_gluon_transforms.py          |  11 +-
 tests/python/gpu/test_operator_gpu.py              |  14 +
 tests/python/gpu/test_predictor.py                 | 128 ++++
 tests/python/mkl/test_mkldnn.py                    |  48 +-
 tests/python/mkl/test_subgraph.py                  |  29 +-
 tests/python/tensorrt/test_ops.py                  |  68 +++
 tests/python/tensorrt/test_tensorrt_lenet5.py      |   2 +-
 tests/python/unittest/test_autograd.py             |  21 +
 tests/python/unittest/test_contrib_amp.py          |  86 ---
 tests/python/unittest/test_dlpack.py               |  48 ++
 tests/python/unittest/test_gluon.py                |  71 ++-
 tests/python/unittest/test_gluon_data.py           |  25 +
 tests/python/unittest/test_gluon_data_vision.py    |   9 +
 tests/python/unittest/test_higher_order_grad.py    | 129 +++-
 tests/python/unittest/test_io.py                   |  12 +
 tests/python/unittest/test_ndarray.py              |   5 +-
 tests/python/unittest/test_numpy_gluon.py          |   7 +-
 tests/python/unittest/test_numpy_ndarray.py        |  24 +-
 tests/python/unittest/test_numpy_op.py             |  94 ++-
 tests/python/unittest/test_operator.py             | 170 +++++-
 tests/python/unittest/test_profiler.py             | 220 +++++++
 tests/python/unittest/test_sparse_ndarray.py       |   5 +
 tests/tutorials/test_tutorials.py                  |  12 +
 tools/diagnose.py                                  |  22 +-
 tools/license_header.py                            |   3 +
 tools/setup_gpu_build_tools.sh                     |  16 +-
 tools/staticbuild/README.md                        |   4 +-
 354 files changed, 13269 insertions(+), 2870 deletions(-)
 copy {tools/coreml/converter => benchmark}/__init__.py (100%)
 create mode 100644 benchmark/opperf/README.md
 copy {tools/coreml/converter => benchmark/opperf}/__init__.py (100%)
 copy {tools/coreml/converter => benchmark/opperf/custom_operations}/__init__.py (100%)
 create mode 100644 benchmark/opperf/custom_operations/custom_operations.py
 create mode 100644 benchmark/opperf/nd_operations/README.md
 copy {tools/coreml/converter => benchmark/opperf/nd_operations}/__init__.py (100%)
 create mode 100644 benchmark/opperf/nd_operations/binary_operators.py
 create mode 100644 benchmark/opperf/nd_operations/gemm_operators.py
 create mode 100644 benchmark/opperf/nd_operations/nn_activation_operators.py
 create mode 100644 benchmark/opperf/nd_operations/nn_basic_operators.py
 create mode 100644 benchmark/opperf/nd_operations/nn_conv_operators.py
 create mode 100644 benchmark/opperf/nd_operations/random_sampling_operators.py
 create mode 100644 benchmark/opperf/nd_operations/reduction_operators.py
 create mode 100644 benchmark/opperf/nd_operations/unary_operators.py
 create mode 100755 benchmark/opperf/opperf.py
 create mode 100644 benchmark/opperf/results/mxnet_operator_benchmark_results_cpu.md
 create mode 100644 benchmark/opperf/results/mxnet_operator_benchmark_results_gpu.md
 copy {tools/coreml/converter => benchmark/opperf/rules}/__init__.py (100%)
 create mode 100644 benchmark/opperf/rules/default_params.py
 copy {tools/coreml/converter => benchmark/opperf/utils}/__init__.py (100%)
 create mode 100644 benchmark/opperf/utils/benchmark_utils.py
 create mode 100644 benchmark/opperf/utils/common_utils.py
 create mode 100644 benchmark/opperf/utils/ndarray_utils.py
 create mode 100644 benchmark/opperf/utils/op_registry_utils.py
 create mode 100644 benchmark/opperf/utils/profiler_utils.py
 copy ci/docker/install/{ubuntu_tutorials.sh => ubuntu_ar.sh} (64%)
 copy perl-package/AI-MXNet/t/AI-MXNet.t => contrib/clojure-package/examples/cnn-text-classification/get_fasttext_data.sh (85%)
 mode change 100644 => 100755
 create mode 100644 cpp-package/example/inference/imagenet_inference.cpp
 delete mode 100644 cpp-package/example/inference/inception_inference.cpp
 create mode 100755 cpp-package/example/inference/unit_test_imagenet_inference.sh
 delete mode 100755 cpp-package/example/inference/unit_test_inception_inference.sh
 create mode 100644 cpp-package/include/mxnet-cpp/contrib.h
 rename docs/{ => _static}/tutorials/tensorrt/wavenet_optimized.png (100%)
 rename docs/{ => _static}/tutorials/tensorrt/wavenet_unoptimized.png (100%)
 create mode 100644 docs/install/install-jetson.md
 delete mode 100644 docs/install/raspbian_setup.md
 delete mode 100644 docs/install/tx2_setup.md
 create mode 100644 docs/tutorials/gluon/multi_gpu.md
 create mode 100644 docs/tutorials/gluon/performance.md
 create mode 100644 docs/tutorials/gluon/transforms.md
 delete mode 100644 docs/tutorials/python/profiler_nvprof.png
 delete mode 100644 docs/tutorials/python/profiler_nvprof_zoomed.png
 delete mode 100644 docs/tutorials/python/profiler_winograd.png
 create mode 100644 docs/tutorials/sparse/train_gluon.md
 rename {python/minpy => example/automatic-mixed-precision}/README.md (50%)
 create mode 100644 example/automatic-mixed-precision/amp_model_conversion.py
 copy example/{quantization => automatic-mixed-precision}/common (100%)
 delete mode 120000 include/mkldnn
 create mode 120000 include/mkldnn/mkldnn.h
 create mode 120000 include/mkldnn/mkldnn_types.h
 create mode 100644 perl-package/AI-MXNet/lib/AI/MXNet/NS.pm
 create mode 100644 scala-package/core/src/main/scala/org/apache/mxnet/SparseFormat.scala
 create mode 100644 scala-package/core/src/main/scala/org/apache/mxnet/SparseNDArray.scala
 create mode 100644 scala-package/core/src/test/scala/org/apache/mxnet/SparseNDArraySuite.scala
 create mode 100644 scala-package/externalPom/pom.xml
 rename scala-package/{deploy => externalPom}/src/main/deploy/deploy.xml (100%)
 create mode 100644 scala-package/mxnet-demo/java-demo/src/main/java/mxnet/HelloWorld.java
 create mode 100644 src/common/cuda_utils.cc
 create mode 100644 src/nnvm/amp_infer_unknown.cc
 create mode 100644 src/nnvm/low_precision_pass.cc
 create mode 100644 src/operator/nn/mkldnn/mkldnn_flatten.cc
 create mode 100644 src/operator/nn/mkldnn/mkldnn_reshape-inl.h
 create mode 100644 src/profiler/custom_op_profiler.h
 create mode 100644 tests/python/gpu/test_contrib_amp.py
 create mode 100644 tests/python/gpu/test_predictor.py
 create mode 100644 tests/python/tensorrt/test_ops.py
 delete mode 100644 tests/python/unittest/test_contrib_amp.py
 create mode 100644 tests/python/unittest/test_dlpack.py
 mode change 100644 => 100755 tools/diagnose.py


[incubator-mxnet] 01/01: Fix build failure

Posted by re...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

reminisce pushed a commit to branch numpy
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git

commit 7da1a11df8862bd6ec2edc3e35cd91202a3e07ee
Author: reminisce <wu...@gmail.com>
AuthorDate: Wed Jul 17 13:14:27 2019 +0800

    Fix build failure
---
 python/mxnet/gluon/loss.py                  |  2 -
 python/mxnet/gluon/nn/basic_layers.py       |  4 +-
 python/mxnet/gluon/utils.py                 | 52 +---------------
 python/mxnet/numpy_extension/__init__.py    |  5 +-
 python/mxnet/test_utils.py                  |  1 +
 src/operator/numpy/np_init_op.cc            |  2 +-
 src/operator/numpy/np_init_op.cu            |  2 +-
 tests/python/unittest/test_contrib_amp.py   | 86 --------------------------
 tests/python/unittest/test_numpy_gluon.py   |  7 ++-
 tests/python/unittest/test_numpy_ndarray.py | 24 ++++----
 tests/python/unittest/test_numpy_op.py      | 94 ++++++++++++++---------------
 11 files changed, 66 insertions(+), 213 deletions(-)

diff --git a/python/mxnet/gluon/loss.py b/python/mxnet/gluon/loss.py
index d634e79..d2e2344 100644
--- a/python/mxnet/gluon/loss.py
+++ b/python/mxnet/gluon/loss.py
@@ -29,7 +29,6 @@ import numpy as np
 from .. import ndarray
 from ..base import numeric_types
 from .block import HybridBlock
-from .utils import _adapt_np_array
 from ..util import is_np_array
 
 
@@ -188,7 +187,6 @@ class L1Loss(Loss):
     def __init__(self, weight=None, batch_axis=0, **kwargs):
         super(L1Loss, self).__init__(weight, batch_axis, **kwargs)
 
-    @_adapt_np_array
     def hybrid_forward(self, F, pred, label, sample_weight=None):
         label = _reshape_like(F, label, pred)
         loss = F.abs(label - pred)
diff --git a/python/mxnet/gluon/nn/basic_layers.py b/python/mxnet/gluon/nn/basic_layers.py
index 87d6e89..8596742 100644
--- a/python/mxnet/gluon/nn/basic_layers.py
+++ b/python/mxnet/gluon/nn/basic_layers.py
@@ -25,7 +25,7 @@ import numpy as np
 
 from .activations import Activation
 from ..block import Block, HybridBlock
-from ..utils import _indent, _adapt_np_array
+from ..utils import _indent
 from ... import nd, sym
 from ...util import is_np_array
 
@@ -521,7 +521,6 @@ class InstanceNorm(HybridBlock):
                                     shape=(in_channels,), init=beta_initializer,
                                     allow_deferred_init=True)
 
-    @_adapt_np_array
     def hybrid_forward(self, F, x, gamma, beta):
         if self._axis == 1:
             return F.InstanceNorm(x, gamma, beta,
@@ -706,7 +705,6 @@ class HybridLambda(HybridBlock):
                 "Unrecognized function in lambda: {} of type {}"
                 .format(function, type(function)))
 
-    @_adapt_np_array
     def hybrid_forward(self, F, x, *args):
         return self._func(F, x, *args)
 
diff --git a/python/mxnet/gluon/utils.py b/python/mxnet/gluon/utils.py
index 2822c70..b8e5b26 100644
--- a/python/mxnet/gluon/utils.py
+++ b/python/mxnet/gluon/utils.py
@@ -40,7 +40,7 @@ except ImportError:
 import numpy as np
 
 from .. import ndarray
-from ..util import is_np_shape, is_np_array, wraps_safely
+from ..util import is_np_shape, is_np_array
 from .. import numpy as _mx_np  # pylint: disable=reimported
 
 
@@ -484,53 +484,3 @@ def _check_all_np_ndarrays(out):
         for i in out:
             _check_all_np_ndarrays(i)
     # pylint: enable=no-else-raise
-
-
-def _to_classic_arrays(*args, **kwargs):
-    """Convert arrays to classic arrays. This is used in a Gluon layer for converting
-    inputs of np arrays to classic arrays so that the layer built with legacy ops can still
-    be used in np_array semantics."""
-    from ..numpy import ndarray as np_ndarray
-    from ..symbol.numpy import _Symbol as np_symbol
-    num_inputs = len(args)
-    assert num_inputs != 0
-    if not is_np_array():
-        return args, kwargs
-    in_arrs = [arr if arr is None else arr.as_nd_ndarray() for arr in args]
-    new_kwargs = {}
-    for k, v in kwargs.items():
-        if isinstance(v, (np_ndarray, np_symbol)):
-            new_kwargs[k] = v.as_nd_ndarray()
-        else:
-            new_kwargs[k] = v
-    return in_arrs, new_kwargs
-
-
-def _to_np_arrays(*args):
-    """Convert arrays to np arrays. This is used in a Gluon layer for converting
-    outputs of classic arrays to np arrays so that the layer built with legacy ops can still
-    be used in np_array semantics."""
-    num_outputs = len(args)
-    assert num_outputs != 0
-    if not is_np_array():
-        return args[0] if num_outputs == 1 else args
-    out = [arr.as_np_ndarray() for arr in args]
-    return out[0] if num_outputs == 1 else out
-
-
-# TODO(junwu): This is a temp solution for allowing basic layers
-# implemented using legacy ops to accept np.ndarrays as inputs and return
-# np.ndarrays as outputs. We should remove it after changing all the layers
-# to use np ops in np_array semantics in the future.
-def _adapt_np_array(func):
-    @wraps_safely(func)
-    def _with_np_array(*args, **kwargs):
-        assert len(args) > 2, "expect at least three arguments in args"
-        if is_np_array():
-            input_args, kwargs = _to_classic_arrays(*args[2:], **kwargs)
-            input_args = list(args[0:2]) + list(input_args)
-            out = func(*input_args, **kwargs)
-            return _to_np_arrays(out)
-        else:
-            return func(*args, **kwargs)
-    return _with_np_array
diff --git a/python/mxnet/numpy_extension/__init__.py b/python/mxnet/numpy_extension/__init__.py
index 6e89c00..4c26f59 100644
--- a/python/mxnet/numpy_extension/__init__.py
+++ b/python/mxnet/numpy_extension/__init__.py
@@ -25,10 +25,7 @@ from . import image
 from . import _register
 from ._op import *  # pylint: disable=wildcard-import
 from ..context import *  # pylint: disable=wildcard-import
-# TODO(junwu): revisit what functions should be exposed to users
-from ..util import use_np_shape, np_shape, is_np_shape
-from ..util import use_np_array, np_array, is_np_array
-from ..util import set_np, use_np, reset_np
+from ..util import is_np_shape, is_np_array, set_np, reset_np
 from ..ndarray import waitall
 from .utils import *  # pylint: disable=wildcard-import
 from .random import *  # pylint: disable=wildcard-import
diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py
index 0dcb54b..7ecfd58 100644
--- a/python/mxnet/test_utils.py
+++ b/python/mxnet/test_utils.py
@@ -49,6 +49,7 @@ from .ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID
 from .ndarray import array
 from .symbol import Symbol
 from .symbol.numpy import _Symbol as np_symbol
+from .util import use_np  # pylint: disable=unused-import
 
 
 def default_context():
diff --git a/src/operator/numpy/np_init_op.cc b/src/operator/numpy/np_init_op.cc
index dc262fe..fc1abe7 100644
--- a/src/operator/numpy/np_init_op.cc
+++ b/src/operator/numpy/np_init_op.cc
@@ -115,7 +115,7 @@ NNVM_REGISTER_OP(_npi_arange)
 .set_attr_parser(RangeParamParser)
 .set_attr<mxnet::FInferShape>("FInferShape", NumpyRangeShape)
 .set_attr<nnvm::FInferType>("FInferType", InitType<RangeParam>)
-.set_attr<FCompute>("FCompute<cpu>", RangeCompute<cpu>)
+.set_attr<FCompute>("FCompute<cpu>", RangeCompute<cpu, RangeParam>)
 .add_arguments(RangeParam::__FIELDS__());
 
 NNVM_REGISTER_OP(_npi_eye)
diff --git a/src/operator/numpy/np_init_op.cu b/src/operator/numpy/np_init_op.cu
index 68d1681..7f0d587 100644
--- a/src/operator/numpy/np_init_op.cu
+++ b/src/operator/numpy/np_init_op.cu
@@ -41,7 +41,7 @@ NNVM_REGISTER_OP(_np_ones_like)
 .set_attr<FCompute>("FCompute<gpu>", FillCompute<gpu, 1>);
 
 NNVM_REGISTER_OP(_npi_arange)
-.set_attr<FCompute>("FCompute<gpu>", RangeCompute<gpu>);
+.set_attr<FCompute>("FCompute<gpu>", RangeCompute<gpu, RangeParam>);
 
 NNVM_REGISTER_OP(_npi_eye)
 .set_attr<FCompute>("FCompute<gpu>", NumpyEyeFill<gpu>);
diff --git a/tests/python/unittest/test_contrib_amp.py b/tests/python/unittest/test_contrib_amp.py
deleted file mode 100644
index ef3a6d8..0000000
--- a/tests/python/unittest/test_contrib_amp.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-import mxnet as mx
-import warnings
-import collections
-import ctypes
-import mxnet.contrib.amp as amp
-
-
-def test_amp_coverage():
-    conditional = [item[0] for item in amp.lists.symbol.CONDITIONAL_FP32_FUNCS]
-
-    # Check for duplicates
-    for a in [amp.lists.symbol.FP16_FUNCS,
-          amp.lists.symbol.FP16_FP32_FUNCS,
-          amp.lists.symbol.FP32_FUNCS,
-          amp.lists.symbol.WIDEST_TYPE_CASTS,
-          conditional]:
-        ret = [item for item, count in collections.Counter(a).items() if count > 1]
-        assert ret == [], "Elements " + str(ret) + " are duplicated in the AMP lists."
-
-    t = []
-    for a in [amp.lists.symbol.FP16_FUNCS,
-              amp.lists.symbol.FP16_FP32_FUNCS,
-              amp.lists.symbol.FP32_FUNCS,
-              amp.lists.symbol.WIDEST_TYPE_CASTS,
-              conditional]:
-        t += a
-    ret = [item for item, count in collections.Counter(t).items() if count > 1]
-    assert ret == [], "Elements " + str(ret) + " exist in more than 1 AMP list."
-
-    # Check the coverage
-    py_str = lambda x: x.decode('utf-8')
-
-    plist = ctypes.POINTER(ctypes.c_char_p)()
-    size = ctypes.c_uint()
-
-    mx.base._LIB.MXListAllOpNames(ctypes.byref(size),
-                                     ctypes.byref(plist))
-    op_names = []
-    for i in range(size.value):
-        s = py_str(plist[i])
-        if not s.startswith("_backward") \
-           and not s.startswith("_contrib_backward_"):
-            op_names.append(s)
-
-    ret1 = set(op_names) - set(t)
-
-    if ret1 != set():
-        warnings.warn("Operators " + str(ret1) + " do not exist in AMP lists (in "
-                       "python/mxnet/contrib/amp/lists/symbol.py) - please add them. "
-                       """Please follow these guidelines for choosing a proper list:
-                       - if your operator is not to be used in a computational graph
-                         (e.g. image manipulation operators, optimizers) or does not have
-                         inputs, put it in FP16_FP32_FUNCS list,
-                       - if your operator requires FP32 inputs or is not safe to use with lower
-                         precision, put it in FP32_FUNCS list,
-                       - if your operator supports both FP32 and lower precision, has
-                         multiple inputs and expects all inputs to be of the same
-                         type, put it in WIDEST_TYPE_CASTS list,
-                       - if your operator supports both FP32 and lower precision and has
-                         either a single input or supports inputs of different type,
-                         put it in FP16_FP32_FUNCS list,
-                       - if your operator is both safe to use in lower precision and
-                         it is highly beneficial to use it in lower precision, then
-                         put it in FP16_FUNCS (this is unlikely for new operators)
-                       - If you are not sure which list to choose, FP32_FUNCS is the
-                         safest option""")
-
-if __name__ == '__main__':
-    test_amp_coverage()
diff --git a/tests/python/unittest/test_numpy_gluon.py b/tests/python/unittest/test_numpy_gluon.py
index b4db7bf..1821f8d 100644
--- a/tests/python/unittest/test_numpy_gluon.py
+++ b/tests/python/unittest/test_numpy_gluon.py
@@ -20,7 +20,8 @@ from __future__ import absolute_import
 from __future__ import division
 
 import mxnet as mx
-from mxnet import gluon, autograd, np, npx
+from mxnet import gluon, autograd, np
+from mxnet.test_utils import use_np
 
 
 def test_create_np_param():
@@ -45,7 +46,7 @@ def test_create_np_param():
         def hybrid_forward(self, F, x, w):
             return F.dot(x, w)
 
-    @npx.use_np
+    @use_np
     class TestBlock2(gluon.HybridBlock):
         def __init__(self):
             super(TestBlock2, self).__init__()
@@ -62,7 +63,7 @@ def test_create_np_param():
     check_block_params(x.as_np_ndarray(), TestBlock2, True, np.ndarray)
 
 
-@npx.use_np
+@use_np
 def test_optimizer_with_np_ndarrays():
     class LinearRegression(gluon.HybridBlock):
         def __init__(self, num_input_dim=0, num_hidden_dim=100, num_output_dim=10):
diff --git a/tests/python/unittest/test_numpy_ndarray.py b/tests/python/unittest/test_numpy_ndarray.py
index 887bb9a..080a662 100644
--- a/tests/python/unittest/test_numpy_ndarray.py
+++ b/tests/python/unittest/test_numpy_ndarray.py
@@ -23,12 +23,12 @@ import numpy as _np
 import mxnet as mx
 from mxnet import np, npx, autograd
 from mxnet.gluon import HybridBlock
-from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray, retry, assert_exception
+from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray, retry, assert_exception, use_np
 from common import with_seed, TemporaryDirectory
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_array_creation():
     dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, None]
     objects = [
@@ -53,7 +53,7 @@ def test_array_creation():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_zeros():
     # test np.zeros in Gluon
     class TestZeros(HybridBlock):
@@ -101,7 +101,7 @@ def test_zeros():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_ones():
     # test np.ones in Gluon
     class TestOnes(HybridBlock):
@@ -167,7 +167,7 @@ def test_ndarray_binary_element_wise_ops():
     def get_np_ret(x1, x2, op):
         return np_op_map[op](x1, x2)
 
-    @npx.use_np_shape
+    @use_np
     class TestBinaryElementWiseOp(HybridBlock):
         def __init__(self, op, scalar=None, reverse=False):
             super(TestBinaryElementWiseOp, self).__init__()
@@ -235,7 +235,7 @@ def test_ndarray_binary_element_wise_ops():
                 print(self._op)
                 assert False
 
-    @npx.use_np_shape
+    @use_np
     def check_binary_op_result(shape1, shape2, op, dtype=None):
         if shape1 is None:
             mx_input1 = abs(_np.random.uniform()) + 1
@@ -305,7 +305,7 @@ def test_ndarray_binary_element_wise_ops():
 
 @with_seed()
 def test_hybrid_block_multiple_outputs():
-    @npx.use_np_shape
+    @use_np
     class TestAllNumpyOutputs(HybridBlock):
         def hybrid_forward(self, F, x, *args, **kwargs):
             return F.npx.relu(x), F.np.sum(x)
@@ -325,7 +325,7 @@ def test_hybrid_block_multiple_outputs():
             assert type(out1) is expected_out_type
             assert type(out2) is expected_out_type
 
-    @npx.use_np_array
+    @use_np
     class TestMixedTypeOutputsFailure(HybridBlock):
         def hybrid_forward(self, F, x, *args, **kwargs):
             return F.relu(x.as_nd_ndarray()), F.np.sum(x)
@@ -337,7 +337,7 @@ def test_hybrid_block_multiple_outputs():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_grad_ndarray_type():
     data = np.array(2, dtype=_np.float32)
     data.attach_grad()
@@ -375,7 +375,7 @@ def test_np_ndarray_copy():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_ndarray_indexing():
     def test_getitem(np_array, index):
         """`is_scalar` indicates whether we should expect a scalar for the result.
@@ -627,7 +627,7 @@ def test_np_ndarray_indexing():
 
 
 @with_seed()
-@npx.use_np
+@use_np
 def test_np_save_load_ndarrays():
     shapes = [(2, 0, 1), (0,), (), (), (0, 4), (), (3, 0, 0, 0), (2, 1), (0, 5, 0), (4, 5, 6), (0, 0, 0)]
     array_list = [_np.random.randint(0, 10, size=shape) for shape in shapes]
@@ -671,7 +671,7 @@ def test_np_save_load_ndarrays():
 
 @retry(5)
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_multinomial():
     pvals_list = [[0.0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0.0]]
     sizes = [None, (), (3,), (2, 5, 7), (4, 9)]
diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py
index cd323e2..8a89b91 100644
--- a/tests/python/unittest/test_numpy_op.py
+++ b/tests/python/unittest/test_numpy_op.py
@@ -24,20 +24,20 @@ from mxnet.base import MXNetError
 from mxnet.gluon import HybridBlock
 from mxnet.base import MXNetError
 from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray
-from mxnet.test_utils import check_numeric_gradient
+from mxnet.test_utils import check_numeric_gradient, use_np
 from common import assertRaises, with_seed
 import random
 import collections
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_tensordot():
     class TestTensordot(HybridBlock):
         def __init__(self, axes):
             super(TestTensordot, self).__init__()
             self._axes = axes
-            
+
         def hybrid_forward(self, F, a, b):
             return F.np.tensordot(a, b, self._axes)
 
@@ -180,7 +180,7 @@ def test_np_tensordot():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_sum():
     class TestSum(HybridBlock):
         def __init__(self, axis=None, dtype=None, keepdims=False):
@@ -242,7 +242,7 @@ def test_np_sum():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_dot():
     shapes = [
         ((3, 0), (0, 4)),
@@ -290,9 +290,8 @@ def test_np_dot():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_mean():
-    @npx.use_np_shape
     class TestMean(HybridBlock):
         def __init__(self, axis=None, dtype=None, keepdims=False):
             super(TestMean, self).__init__()
@@ -355,9 +354,8 @@ def test_np_mean():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_max():
-    @npx.use_np_shape
     class TestMax(HybridBlock):
         def __init__(self, axis=None, keepdims=False):
             super(TestMax, self).__init__()
@@ -444,7 +442,7 @@ def test_np_max():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_transpose():
     # TODO(junwu): Add more test cases
     data = mx.sym.var('a').as_np_ndarray()
@@ -474,7 +472,7 @@ def test_np_transpose():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_npx_relu():
     # TODO(junwu): Add more test cases
     data = mx.sym.var('data').as_np_ndarray()
@@ -490,7 +488,7 @@ def test_npx_relu():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_npx_sigmoid():
     # TODO(junwu): Add more test cases
     data = mx.sym.var('data').as_np_ndarray()
@@ -506,7 +504,7 @@ def test_npx_sigmoid():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_reshape():
     # TODO(junwu): Add more test cases
     data = mx.sym.var('a').as_np_ndarray()
@@ -524,7 +522,7 @@ def test_np_reshape():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_maximum():
     # TODO(junwu): Add more test cases
     x1, x2 = mx.sym.var('x1').as_np_ndarray(), mx.sym.var('x2').as_np_ndarray()
@@ -545,7 +543,7 @@ def test_np_maximum():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_minimum():
     # TODO(junwu): Add more test cases
     x1, x2 = mx.sym.var('x1').as_np_ndarray(), mx.sym.var('x2').as_np_ndarray()
@@ -566,10 +564,9 @@ def test_np_minimum():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_unary_funcs():
     def check_unary_func(func, ref_grad, shape, low, high):
-        @npx.use_np_shape
         class TestUnary(HybridBlock):
             def __init__(self, func):
                 super(TestUnary, self).__init__()
@@ -641,9 +638,8 @@ def test_np_unary_funcs():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_stack():
-    @npx.use_np_shape
     class TestStack(HybridBlock):
         def __init__(self, axis=None):
             super(TestStack, self).__init__()
@@ -694,7 +690,7 @@ def test_np_stack():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_random():
     shapes = [(), (1,), (2, 3), (4, 0, 5), 6, (7, 8), None]
     dtypes = ['float16', 'float32', 'float64']
@@ -710,7 +706,6 @@ def test_np_random():
                     expected_shape = () if shape is None else (shape,)
                 assert out.shape == expected_shape
 
-    @npx.use_np
     class TestRandom(HybridBlock):
         def __init__(self, shape, op_name):
             super(TestRandom, self).__init__()
@@ -737,7 +732,7 @@ def test_np_random():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_arange():
     configs = [
         (1, 10, 2),
@@ -772,7 +767,6 @@ def test_np_arange():
                 np_ret = _np.arange(config, dtype=dtype)
             assert same(mx_ret.asnumpy(), np_ret)
 
-    @npx.use_np
     class TestRange(HybridBlock):
         def __init__(self, start, stop=None, step=None, dtype=None):
             super(TestRange, self).__init__()
@@ -801,7 +795,7 @@ def test_np_arange():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_linspace():
     configs = [
         (0.0, 1.0, 10),
@@ -835,7 +829,7 @@ def test_np_linspace():
     # check linspace equivalent to arange
     for test_index in range(1000):
         assert_almost_equal(mx.np.linspace(0, test_index, test_index + 1).asnumpy(), mx.np.arange(test_index + 1).asnumpy())
-    @npx.use_np
+    @use_np
     class TestLinspace(HybridBlock):
         def __init__(self, start, stop, num=50, endpoint=None, retstep=False, dtype=None, axis=0):
             super(TestLinspace, self).__init__()
@@ -871,7 +865,7 @@ def test_np_linspace():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_eye():
     configs = [
         4,
@@ -910,7 +904,7 @@ def test_np_eye():
             assertRaises(MXNetError, np.eye, *config)
         else:
             assertRaises(MXNetError, np.eye, config)
-    @npx.use_np
+    @use_np
     class TestEye(HybridBlock):
         def __init__(self, N, M=None, k=0, dtype=None):
             super(TestEye, self).__init__()
@@ -939,7 +933,7 @@ def test_np_eye():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_argmax():
     workloads = [
         ((), 0, False),
@@ -956,7 +950,7 @@ def test_np_argmax():
     ]
     dtypes = ['float16', 'float32', 'float64']
 
-    @npx.use_np
+    @use_np
     class TestArgMax(HybridBlock):
         def __init__(self, axis=None):
             super(TestArgMax, self).__init__()
@@ -1001,9 +995,9 @@ def test_np_argmax():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_argsort():
-    @npx.use_np_shape
+    @use_np
     class TestArgsort(HybridBlock):
         def __init__(self, axis=-1):
             super(TestArgsort, self).__init__()
@@ -1042,9 +1036,9 @@ def test_np_argsort():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_linalg_norm():
-    @npx.use_np
+    @use_np
     class TestLinalgNorm(HybridBlock):
         def __init__(self, ord=None, axis=None, keepdims=False):
             super(TestLinalgNorm, self).__init__()
@@ -1073,7 +1067,7 @@ def test_np_linalg_norm():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_concat():
     class TestConcat(HybridBlock):
         def __init__(self, axis=None):
@@ -1124,12 +1118,12 @@ def test_np_concat():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_hstack():
     class TestHStack(HybridBlock):
         def __init__(self):
             super(TestHStack, self).__init__()
-        
+
         def hybrid_forward(self, F, a, *args):
             return F.np.hstack([a] + list(args))
 
@@ -1189,7 +1183,7 @@ def test_np_hstack():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_swapaxes():
     config = [((0, 1, 2), 0, 1),
               ((0, 1, 2), -1, -2),
@@ -1221,7 +1215,7 @@ def test_np_swapaxes():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_squeeze():
     config = [((), None),
               ((), -1),
@@ -1255,7 +1249,7 @@ def test_np_squeeze():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_split():
     class TestSplit(HybridBlock):
         def __init__(self, indices_or_sections, axis=None):
@@ -1308,12 +1302,12 @@ def test_np_split():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_cumsum():
     def np_cumsum_backward(ograd, axis=None, dtype=None):
         return _np.flip(_np.cumsum(_np.flip(ograd, axis=axis), axis=axis, dtype=dtype), axis=axis)
 
-    @npx.use_np_shape
+    @use_np
     class TestCumsum(HybridBlock):
         def __init__(self, axis=None, dtype=None):
             super(TestCumsum, self).__init__()
@@ -1350,7 +1344,7 @@ def test_np_cumsum():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_tile():
     config = [
         ((), ()),
@@ -1391,7 +1385,7 @@ def test_np_tile():
 
 
 @with_seed()
-@npx.use_np_shape
+@use_np
 def test_np_prod():
     class TestProd(HybridBlock):
         def __init__(self, axis=None, dtype=None, keepdims=False):
@@ -1443,7 +1437,7 @@ def test_np_prod():
 
 
 @with_seed()
-@npx.use_np
+@use_np
 def test_np_flatten():
     # TODO(junwu): Add more test cases
     shapes = [(), (2, 0, 1), (3, 4, 5), 6]
@@ -1456,7 +1450,7 @@ def test_np_flatten():
 
 
 @with_seed()
-@npx.use_np
+@use_np
 def test_np_broadcast_to():
     # TODO(junwu): Add more test cases and backward test
     shapes = [(1, 2, 3, 4, 5), (1, 0, 3, 4, 5)]
@@ -1469,7 +1463,7 @@ def test_np_broadcast_to():
 
 
 @with_seed()
-@npx.use_np
+@use_np
 def test_np_meshgrid():
     nx, ny = (4, 5)
     x = np.linspace(0, 1, nx)
@@ -1484,14 +1478,14 @@ def test_np_meshgrid():
 
 
 @with_seed()
-@npx.use_np
+@use_np
 def test_np_broadcast_arrays():
     # TODO(junwu): Add test
     pass
 
 
 @with_seed()
-@npx.use_np
+@use_np
 def test_np_trace():
     class TestTrace(HybridBlock):
         def __init__(self, axis1, axis2, offset):
@@ -1499,10 +1493,10 @@ def test_np_trace():
             self._axis1 = axis1
             self._axis2 = axis2
             self._offset = offset
-          
+
         def hybrid_forward(self, F, data):
             return F.np.trace(data, axis1=self._axis1, axis2=self._axis2, offset=self._offset)
-    
+
     def g(data, axis1, axis2, offset):
         idx = _np.indices(data.shape)
         ret = _np.zeros_like(data)