You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2017/12/12 22:42:24 UTC

[GitHub] piiswrong closed pull request #8689: Mark tests that should only be run nightly.

piiswrong closed pull request #8689: Mark tests that should only be run nightly.
URL: https://github.com/apache/incubator-mxnet/pull/8689
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/Jenkinsfile b/Jenkinsfile
index cbe63758ac..141c76a84b 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -86,7 +86,7 @@ echo ${libs} | sed -e 's/,/ /g' | xargs md5sum
 def python2_ut(docker_type) {
   timeout(time: max_time, unit: 'MINUTES') {
     sh "${docker_run} ${docker_type} find . -name '*.pyc' -type f -delete"
-    sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-2.7 --with-timer --verbose tests/python/unittest"
+    sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-2.7 --with-timer --verbose -a '!nightly,!crashing' tests/python/unittest"
     sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-2.7 --with-timer --verbose tests/python/train"
   }
 }
@@ -95,7 +95,7 @@ def python2_ut(docker_type) {
 def python3_ut(docker_type) {
   timeout(time: max_time, unit: 'MINUTES') {
     sh "${docker_run} ${docker_type} find . -name '*.pyc' -type f -delete"
-    sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-3.4 --with-timer --verbose tests/python/unittest"
+    sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-3.4 --with-timer --verbose -a '!nightly,!crashing' tests/python/unittest"
   }
 }
 
@@ -105,7 +105,7 @@ def python3_ut(docker_type) {
 def python2_gpu_ut(docker_type) {
   timeout(time: max_time, unit: 'MINUTES') {
     sh "${docker_run} ${docker_type} find . -name '*.pyc' -type f -delete"
-    sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-2.7 --with-timer --verbose tests/python/gpu"
+    sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-2.7 --with-timer --verbose -a '!nightly,!crashing' tests/python/gpu"
   }
 }
 
@@ -113,7 +113,7 @@ def python2_gpu_ut(docker_type) {
 def python3_gpu_ut(docker_type) {
   timeout(time: max_time, unit: 'MINUTES') {
     sh "${docker_run} ${docker_type} find . -name '*.pyc' -type f -delete"
-    sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-3.4 --with-timer --verbose tests/python/gpu"
+    sh "${docker_run} ${docker_type} PYTHONPATH=./python/ nosetests-3.4 --with-timer --verbose -a '!nightly,!crashing' tests/python/gpu"
   }
 }
 
diff --git a/tests/ci_build/ci_build.sh b/tests/ci_build/ci_build.sh
index 79fcd86a5d..f89c555ba6 100755
--- a/tests/ci_build/ci_build.sh
+++ b/tests/ci_build/ci_build.sh
@@ -139,9 +139,12 @@ echo "Running '${COMMAND[@]}' inside ${DOCKER_IMG_NAME}..."
 # By default we cleanup - remove the container once it finish running (--rm)
 # and share the PID namespace (--pid=host) so the process inside does not have
 # pid 1 and SIGKILL is propagated to the process inside (jenkins can kill it).
+# We're passing the cuda archs specifically for the SMs needed by the CI.
 ${DOCKER_BINARY} run --rm --pid=host \
     -v ${WORKSPACE}:/workspace \
     -w /workspace \
+    -e "CUDA_ARCH=-gencode arch=compute_30,code=sm_30 -gencode arch=compute_52,code=[sm_52,compute_52]" \
+    -e "MXNET_STORAGE_FALLBACK_LOG_VERBOSE=0" \
     -e "CI_BUILD_HOME=${WORKSPACE}" \
     -e "CI_BUILD_USER=$(id -u -n)" \
     -e "CI_BUILD_UID=$(id -u)" \
diff --git a/tests/python/gpu/test_forward.py b/tests/python/gpu/test_forward.py
index cddf9afb9c..2870c400b8 100644
--- a/tests/python/gpu/test_forward.py
+++ b/tests/python/gpu/test_forward.py
@@ -19,12 +19,15 @@
 import numpy as np
 import mxnet as mx
 from mxnet.test_utils import *
+from nose.plugins.attrib import attr
+
 
 def _get_model():
     if not os.path.exists('model/Inception-7-symbol.json'):
         download('http://data.mxnet.io/models/imagenet/inception-v3.tar.gz', dirname='model')
         os.system("cd model; tar -xf inception-v3.tar.gz --strip-components 1")
 
+
 def _dump_images(shape):
     import skimage.io
     import skimage.transform
@@ -40,10 +43,13 @@ def _dump_images(shape):
     imgs = np.asarray(img_list, dtype=np.float32).transpose((0, 3, 1, 2)) - 128
     np.save('data/test_images_%d_%d.npy'%shape, imgs)
 
+
 def _get_data(shape):
     download("http://data.mxnet.io/data/test_images_%d_%d.npy" % (shape), dirname='data')
     download("http://data.mxnet.io/data/inception-v3-dump.npz", dirname="data")
 
+
+@attr('nightly')
 def test_consistency(dump=False):
     shape = (299, 299)
     _get_model()
@@ -64,5 +70,6 @@ def test_consistency(dump=False):
     if dump:
         np.savez('data/inception-v3-dump.npz', **{n: a.asnumpy() for n, a in gt.items()})
 
+
 if __name__ == '__main__':
     test_consistency(False)
diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py
index 13b547eb47..b651023f75 100644
--- a/tests/python/gpu/test_operator_gpu.py
+++ b/tests/python/gpu/test_operator_gpu.py
@@ -83,6 +83,7 @@ def check_countsketch(in_dim,out_dim,n):
                 a[j,i] = out_grad.asnumpy()[j, h[0,i]] * s[0,i]
     assert_almost_equal(a,arr_grad[0].asnumpy(),rtol=1e-3, atol=1e-12)
 
+
 def test_countsketch():
     np.random.seed(0)
     nrepeat = 2
@@ -97,6 +98,7 @@ def test_countsketch():
         n = np.random.randint(1,maxn)
         check_countsketch(in_dim, out_dim, n)
 
+
 def check_ifft(shape):
     shape_old = shape
     if len(shape) == 2:
@@ -178,6 +180,7 @@ def test_ifft():
             shape = tuple(np.random.randint(1, maxdim, size=order))
             check_ifft(shape)
 
+
 def check_fft(shape):
     sym = mx.sym.contrib.fft(name='fft', compute_size = 128)
     if len(shape) == 2:
@@ -256,6 +259,7 @@ def check_fft(shape):
         a = np.fft.ifft(out_grad_complex, n=None, axis=-1, norm=None)
         assert_almost_equal(a.real, exe.grad_arrays[0].asnumpy()/shape[3],rtol=1e-3, atol=1e-6)
 
+
 def test_fft():
     np.random.seed(0)
     nrepeat = 2
@@ -265,6 +269,7 @@ def test_fft():
             shape = tuple(np.random.randint(1, maxdim, size=order))
             check_fft(shape)
 
+
 def test_batchnorm_with_type():
   ctx_list_v1_2D = [
     {'ctx': mx.cpu(0), 'norm_data': (10, 2, 10, 10), 'type_dict': {'norm_data': np.float32}},
@@ -373,6 +378,7 @@ def test_batchnorm_versions_helper(batchnorm_op_list, data, fix_gamma, use_globa
 
     check_consistency(sym_list, ctx_list)
 
+
   def test_1d_batchnorm(fix_gamma, use_global_stats):
     data = (2, 3, 20)
     test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
@@ -380,6 +386,7 @@ def test_1d_batchnorm(fix_gamma, use_global_stats):
                                    data=data,
                                    fix_gamma=fix_gamma, use_global_stats=use_global_stats)
 
+
   def test_2d_batchnorm(fix_gamma, use_global_stats):
     data = (2, 3, 10, 10)
     test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_v1_cpu', 'batchnorm_v1_gpu',
@@ -388,6 +395,7 @@ def test_2d_batchnorm(fix_gamma, use_global_stats):
                                    data=data,
                                    fix_gamma=fix_gamma, use_global_stats=use_global_stats)
 
+
   def test_3d_batchnorm(fix_gamma, use_global_stats):
     data = (2, 3, 3, 5, 5)
     test_batchnorm_versions_helper(batchnorm_op_list=['batchnorm_cpu',
@@ -445,12 +453,14 @@ def test_convolution_with_type():
     # test ability to turn off training on bias
     check_consistency(sym, ctx_list, grad_req={'conv_data': 'write', 'conv_weight': 'write', 'conv_bias': 'null'}, tol=tol)
 
+
 # Apply N symbols against each of M contexts, checking that all NxM combinations match.
 def check_consistency_NxM(sym_list, ctx_list):
     # e.g. if sym_list=[sym1, sym2] and ctx_list=[ctx1, ctx2, ctx3], then resulting lists are:
     # sym_list=[sym1, sym1, sym1, sym2, sym2, sym2] and ctx_list=[ctx1, ctx2, ctx3, ctx1, ctx2, ctx3]
     check_consistency(np.repeat(sym_list, len(ctx_list)), ctx_list * len(sym_list))
 
+
 def test_convolution_options():
     # 1D convolution
     ctx_list = [{'ctx': mx.gpu(0), 'conv_data': (2, 2, 7), 'type_dict': {'conv_data': np.float64}},
@@ -516,6 +526,7 @@ def test_convolution_options():
     sym_no_cudnn = mx.sym.Convolution(num_filter=3, kernel=(1,1,1), pad=(0,0,0), cudnn_off=True, name='conv')
     check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
 
+
 def test_convolution_versions():
     # 2D convolution NCHW
     ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 7, 7), 'type_dict': {'conv_data': np.float32}},
@@ -541,6 +552,7 @@ def test_convolution_versions():
     syms = [conv_cudnn, conv_cpu, conv_gpu]
     check_consistency(syms, ctx_list)
 
+
 def test_pooling_with_type():
     ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float64}},
                 {'ctx': mx.gpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict': {'pool_data': np.float32}},
@@ -556,6 +568,7 @@ def test_pooling_with_type():
     sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True, name='pool')
     check_consistency(sym, ctx_list)
 
+
 def test_deconvolution_with_type():
     sym = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), name='deconv')
     ctx_list = [{'ctx': mx.gpu(0), 'deconv_data': (2, 2, 10, 10), 'type_dict': {'deconv_data': np.float64}},
@@ -572,6 +585,7 @@ def test_deconvolution_with_type():
     check_consistency(sym, ctx_list, tol=tol)
     check_consistency(sym, ctx_list, tol=tol, grad_req="add")
 
+
 def test_deconvolution_options():
 
 #    # 1D convolution  (not yet enabled)
@@ -612,6 +626,7 @@ def test_deconvolution_options():
     sym_no_cudnn = mx.sym.Deconvolution(num_filter=2, kernel=(3,3), dilate=(2,2), cudnn_off=True, name='deconv')
     check_consistency_NxM([sym, sym_no_cudnn], ctx_list)
 
+
 #    # 3D convolution (not yet enabled)
 #    ctx_list = [{'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
 #                {'ctx': mx.cpu(0), 'conv_data': (2, 2, 5, 7, 7), 'type_dict': {'conv_data': np.float64}},
@@ -644,6 +659,7 @@ def test_bilinear_sampler_with_type():
     check_consistency(sym, ctx_list)
     check_consistency(sym, ctx_list, grad_req="add")
 
+
 def test_grid_generator_with_type():
     data = mx.sym.Variable('data')
     sym = mx.sym.GridGenerator(data=data, transform_type='affine', target_shape=(20, 20))
@@ -657,6 +673,7 @@ def test_grid_generator_with_type():
     check_consistency(sym, ctx_list)
     check_consistency(sym, ctx_list, grad_req="add")
 
+
 @unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7645")
 def test_spatial_transformer_with_type():
     np.random.seed(1234)
@@ -672,6 +689,7 @@ def test_spatial_transformer_with_type():
     check_consistency(sym, ctx_list)
     check_consistency(sym, ctx_list, grad_req="add")
 
+
 # Checking max pooling consistency over the data sets of different float types is problematic
 # as one max value in a float32 data set may not be the max value in a float16 data set.
 # This function will not be called.
@@ -746,6 +764,7 @@ def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, str
                                                name='pool'))
         check_consistency(sym_list, ctx_list)
 
+
     def test_1d_pooling(pool_type):
         data = (2, 3, 20)
         kernel = (4,)
@@ -777,6 +796,7 @@ def test_1d_pooling(pool_type):
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
                                      global_pool=True)
 
+
     def test_2d_pooling(pool_type):
         data = (2, 3, 20, 20)
         kernel = (4, 5)
@@ -1140,6 +1160,7 @@ def test_bidirectional():
     check_rnn_consistency(fused, stack)
     check_rnn_consistency(stack, fused)
 
+
 def test_unfuse():
     for mode in ['rnn_tanh', 'rnn_relu', 'lstm', 'gru']:
         fused = mx.rnn.FusedRNNCell(
@@ -1153,6 +1174,7 @@ def test_unfuse():
         check_rnn_consistency(fused, stack)
         check_rnn_consistency(stack, fused)
 
+
 def test_psroipooling_with_type():
     np.random.seed(1234)
     arg_params = {
@@ -1210,6 +1232,7 @@ def test_deformable_psroipooling_with_type():
                                                'deformable_psroipool_rois': 'null',
                                                'deformable_psroipool_trans': 'write'}, arg_params=arg_params)
 
+
 def test_deformable_convolution_with_type():
     np.random.seed(1234)
     sym = mx.sym.contrib.DeformableConvolution(num_filter=3, kernel=(3,3), name='deformable_conv')
@@ -1314,6 +1337,9 @@ def test_deformable_convolution_options():
     sym = mx.sym.contrib.DeformableConvolution(num_filter=4, kernel=(3,3), num_deformable_group=2,
                                                name='deformable_conv')
 
+
+# Test crashing, see: https://github.com/apache/incubator-mxnet/issues/8564
+@attr('crashing')
 def test_residual_fused():
     cell = mx.rnn.ResidualCell(
             mx.rnn.FusedRNNCell(50, num_layers=3, mode='lstm',
@@ -1333,6 +1359,7 @@ def test_residual_fused():
     expected_outputs = np.ones((10, 2, 50))+5
     assert np.array_equal(outputs[0].asnumpy(), expected_outputs)
 
+
 def check_rnn_layer(layer):
     layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
     with mx.gpu(0):
@@ -1362,6 +1389,7 @@ def test_rnn_layer():
 def test_sequence_reverse():
     check_sequence_reverse(mx.gpu(0))
 
+
 @unittest.skip("Test fails intermittently. Temporarily disabled until fixed. Tracked at https://github.com/apache/incubator-mxnet/issues/8211")
 def test_autograd_save_memory():
     x = mx.nd.zeros((128, 512, 512), ctx=mx.gpu(0))
@@ -1373,6 +1401,7 @@ def test_autograd_save_memory():
             x.wait_to_read()
     x.backward()
 
+
 def test_gluon_ctc_consistency():
     loss = mx.gluon.loss.CTCLoss()
     data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)
diff --git a/tests/python/unittest/test_gluon_data.py b/tests/python/unittest/test_gluon_data.py
index 63c5d28b7c..732cbf0f89 100644
--- a/tests/python/unittest/test_gluon_data.py
+++ b/tests/python/unittest/test_gluon_data.py
@@ -21,6 +21,8 @@
 import mxnet as mx
 import numpy as np
 from mxnet import gluon
+from nose.plugins.attrib import attr
+
 
 def test_array_dataset():
     X = np.random.uniform(size=(10, 20))
@@ -63,6 +65,7 @@ def test_recordimage_dataset():
         assert x.shape[0] == 1 and x.shape[3] == 3
         assert y.asscalar() == i
 
+
 def test_sampler():
     seq_sampler = gluon.data.SequentialSampler(10)
     assert list(seq_sampler) == list(range(10))
@@ -75,6 +78,8 @@ def test_sampler():
     rand_batch_keep = gluon.data.BatchSampler(rand_sampler, 3, 'keep')
     assert sorted(sum(list(rand_batch_keep), [])) == list(range(10))
 
+
+@attr('nightly')
 def test_datasets():
     assert len(gluon.data.vision.MNIST(root='data/mnist')) == 60000
     assert len(gluon.data.vision.MNIST(root='data/mnist', train=False)) == 10000
@@ -86,6 +91,7 @@ def test_datasets():
     assert len(gluon.data.vision.CIFAR100(root='data/cifar100', fine_label=True)) == 50000
     assert len(gluon.data.vision.CIFAR100(root='data/cifar100', train=False)) == 10000
 
+
 def test_image_folder_dataset():
     prepare_record()
     dataset = gluon.data.vision.ImageFolderDataset('data/test_images')
diff --git a/tests/python/unittest/test_gluon_model_zoo.py b/tests/python/unittest/test_gluon_model_zoo.py
index 39d3b19c36..0e98a7e457 100644
--- a/tests/python/unittest/test_gluon_model_zoo.py
+++ b/tests/python/unittest/test_gluon_model_zoo.py
@@ -20,6 +20,7 @@
 from mxnet.gluon import nn
 from mxnet.gluon.model_zoo.custom_layers import HybridConcurrent, Identity
 from mxnet.gluon.model_zoo.vision import get_model
+from nose.plugins.attrib import attr
 import sys
 
 def eprint(*args, **kwargs):
@@ -50,6 +51,7 @@ def test_identity():
                                       x.asnumpy())
 
 
+@attr('nightly')
 def test_models():
     all_models = ['resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
                   'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
diff --git a/tests/python/unittest/test_io.py b/tests/python/unittest/test_io.py
index fa314e0f8b..fc7ecca8c5 100644
--- a/tests/python/unittest/test_io.py
+++ b/tests/python/unittest/test_io.py
@@ -29,8 +29,10 @@
 import sys
 from common import get_data, assertRaises
 import unittest
+from nose.plugins.attrib import attr
 
 
+@attr('nightly')
 def test_MNISTIter():
     # prepare data
     get_data.GetMNIST_ubyte()
@@ -60,6 +62,8 @@ def test_MNISTIter():
     label_1 = train_dataiter.getlabel().asnumpy().flatten()
     assert(sum(label_0 - label_1) == 0)
 
+
+@attr('nightly')
 def test_Cifar10Rec():
     get_data.GetCifar10()
     dataiter = mx.io.ImageRecordIter(
diff --git a/tests/python/unittest/test_module.py b/tests/python/unittest/test_module.py
index a8fb99dfc1..43d40d63a3 100644
--- a/tests/python/unittest/test_module.py
+++ b/tests/python/unittest/test_module.py
@@ -23,6 +23,7 @@
 from mxnet.module.executor_group import DataParallelExecutorGroup
 from common import assertRaises
 from collections import namedtuple
+from nose.plugins.attrib import attr
 
 import numpy.random as rnd
 
@@ -534,6 +535,7 @@ def check_shared_exec_group(sparse_embedding):
         check_shared_exec_group(opt)
 
 
+@attr('nightly')
 def test_factorization_machine_module(verbose=False):
     """ Test factorization machine model with sparse operators """
     def check_factorization_machine_module(optimizer=None, num_epochs=None):
diff --git a/tests/python/unittest/test_ndarray.py b/tests/python/unittest/test_ndarray.py
index 8e1f68fd62..28a568451f 100644
--- a/tests/python/unittest/test_ndarray.py
+++ b/tests/python/unittest/test_ndarray.py
@@ -23,9 +23,11 @@
 from nose.tools import raises
 from mxnet.test_utils import *
 from numpy.testing import assert_allclose
+from nose.plugins.attrib import attr
 import unittest
 import mxnet.autograd
 
+
 def check_with_uniform(uf, arg_shapes, dim=None, npuf=None, rmin=-10, type_list=[np.float32]):
     """check function consistency with uniform random numbers"""
     if isinstance(arg_shapes, int):
@@ -495,6 +497,8 @@ def test_arange():
                         dtype="int32").asnumpy()
     assert_almost_equal(pred, gt)
 
+
+@attr('nightly')
 def test_order(ctx=default_context()):
     def gt_topk(dat, axis, ret_typ, k, is_ascend):
         if ret_typ == "indices":
diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py
index 55a3a57218..b4309234a2 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -23,6 +23,7 @@
 import itertools
 from numpy.testing import assert_allclose, assert_array_equal
 from mxnet.test_utils import *
+from nose.plugins.attrib import attr
 import unittest
 
 
@@ -2225,6 +2226,7 @@ def check_l2_normalization(in_shape, mode, ctx=default_context(), norm_eps=1e-10
     check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=1e-3)
 
 
+@attr('nightly')
 def test_l2_normalization():
     for mode in ['channel', 'spatial', 'instance']:
         for nbatch in [1, 4]:
@@ -2586,6 +2588,7 @@ def test_arange():
     test_arange()
 
 
+@attr('nightly')
 def test_order():
     ctx = default_context()
 
@@ -3653,6 +3656,7 @@ def create_operator(self, ctx, shapes, dtypes):
     assert (y.stype == 'csr')
     assert (aux.stype == 'csr')
 
+@attr('nightly')
 def test_psroipooling():
     for num_rois in [1, 2]:
         for num_classes, num_group in itertools.product([2, 3], [2, 3]):
@@ -3678,6 +3682,7 @@ def test_psroipooling():
                                                grad_nodes=grad_nodes, ctx=mx.gpu(0))
 
 
+@attr('nightly')
 def test_deformable_convolution():
     for num_batch in [1, 2]:
         for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
@@ -3715,6 +3720,7 @@ def test_deformable_convolution():
                                                    grad_nodes=grad_nodes, ctx=mx.gpu(0))
 
 
+@attr('nightly')
 def test_deformable_psroipooling():
     for num_rois in [1, 2]:
         for num_classes, num_group in itertools.product([2, 3], [2, 3]):
diff --git a/tests/python/unittest/test_random.py b/tests/python/unittest/test_random.py
index a67e2d1113..6c85a16156 100644
--- a/tests/python/unittest/test_random.py
+++ b/tests/python/unittest/test_random.py
@@ -18,6 +18,7 @@
 import os
 import mxnet as mx
 import numpy as np
+from nose.plugins.attrib import attr
 
 def same(a, b):
     return np.sum(a != b) == 0
@@ -187,6 +188,8 @@ def check_with_device(device, dtype):
                 for check_name, check_func, tol in symbdic['checks']:
                     assert np.abs(check_func(samples, params)) < tol, "symbolic test: %s check for `%s` did not pass" % (check_name, name)
 
+
+@attr('nightly')
 def test_random():
     check_with_device(mx.context.current_context(), 'float16')
     check_with_device(mx.context.current_context(), 'float32')
diff --git a/tests/python/unittest/test_sparse_ndarray.py b/tests/python/unittest/test_sparse_ndarray.py
index e59e476601..1b342e66de 100644
--- a/tests/python/unittest/test_sparse_ndarray.py
+++ b/tests/python/unittest/test_sparse_ndarray.py
@@ -286,6 +286,7 @@ def check(fn, stype):
         check(lambda x: 0.5 == x, stype)
         check(lambda x: x / 2, stype)
 
+
 def test_sparse_nd_binary_iop():
     N = 3
     def check_binary(fn, stype):
@@ -314,6 +315,7 @@ def inplace_mul(x, y):
         for fn in fns:
             check_binary(fn, stype)
 
+
 def test_sparse_nd_negate():
     def check_sparse_nd_negate(shape, stype):
         npy = np.random.uniform(-10, 10, rand_shape_2d())
@@ -331,6 +333,7 @@ def check_sparse_nd_negate(shape, stype):
     for stype in stypes:
         check_sparse_nd_negate(shape, stype)
 
+
 def test_sparse_nd_broadcast():
     sample_num = 1000
     # TODO(haibin) test with more than 2 dimensions
@@ -365,6 +368,7 @@ def test_sparse_nd_transpose():
         nd = mx.nd.array(npy).tostype(stype)
         assert_almost_equal(npy.T, (nd.T).asnumpy())
 
+
 def test_sparse_nd_storage_fallback():
     def check_output_fallback(shape):
         ones = mx.nd.ones(shape)
@@ -387,6 +391,7 @@ def check_fallback_with_temp_resource(shape):
     check_input_fallback(shape)
     check_fallback_with_temp_resource(shape)
 
+
 def test_sparse_nd_random():
     """ test sparse random operator on cpu """
     # gpu random operator doesn't use fixed seed
@@ -466,6 +471,7 @@ def test_sparse_nd_save_load():
             assert same(x.asnumpy(), y.asnumpy())
     os.remove(fname)
 
+
 def test_sparse_nd_unsupported():
     nd = mx.nd.zeros((2,2), stype='row_sparse')
     fn_slice = lambda x: x._slice(None, None)
@@ -479,6 +485,7 @@ def test_sparse_nd_unsupported():
         except:
             pass
 
+
 def test_create_csr():
     def check_create_csr_from_nd(shape, density, dtype):
         matrix = rand_ndarray(shape, 'csr', density)
@@ -498,6 +505,7 @@ def check_create_csr_from_nd(shape, density, dtype):
         csr_copy = mx.nd.array(csr_created)
         assert(same(csr_copy.asnumpy(), csr_created.asnumpy()))
 
+
     def check_create_csr_from_coo(shape, density, dtype):
         matrix = rand_ndarray(shape, 'csr', density)
         sp_csr = matrix.asscipy()
@@ -514,6 +522,7 @@ def check_create_csr_from_coo(shape, density, dtype):
         assert csr_created.data.dtype == dtype, (csr_created.data.dtype, dtype)
         assert csr_created.context == Context.default_ctx, (csr_created.context, Context.default_ctx)
 
+
     def check_create_csr_from_scipy(shape, density, f):
         def assert_csr_almost_equal(nd, sp):
             assert_almost_equal(nd.data.asnumpy(), sp.data)
@@ -555,6 +564,7 @@ def assert_csr_almost_equal(nd, sp):
         check_create_csr_from_scipy(shape, density, mx.nd.sparse.array)
         check_create_csr_from_scipy(shape, density, mx.nd.array)
 
+
 def test_create_row_sparse():
     dim0 = 50
     dim1 = 50
@@ -571,6 +581,7 @@ def test_create_row_sparse():
         rsp_copy = mx.nd.array(rsp_created)
         assert(same(rsp_copy.asnumpy(), rsp_created.asnumpy()))
 
+
 def test_create_sparse_nd_infer_shape():
     def check_create_csr_infer_shape(shape, density, dtype):
         try:
@@ -613,6 +624,7 @@ def check_create_rsp_infer_shape(shape, density, dtype):
         check_create_rsp_infer_shape(shape, density, dtype)
         check_create_rsp_infer_shape(shape_3d, density, dtype)
 
+
 def test_create_sparse_nd_from_dense():
     def check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx):
         arr = f(dense_arr, dtype=dtype, ctx=ctx)
@@ -635,6 +647,7 @@ def check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx):
                             else np.float32
             check_create_from_dns(shape, f, dense_arr, dtype, default_dtype, ctx)
 
+
 def test_create_sparse_nd_from_sparse():
     def check_create_from_sp(shape, f, sp_arr, dtype, src_dtype, ctx):
         arr = f(sp_arr, dtype=dtype, ctx=ctx)
@@ -666,6 +679,7 @@ def check_create_from_sp(shape, f, sp_arr, dtype, src_dtype, ctx):
     for sp_arr in rsp_arrs:
         check_create_from_sp(shape, f_rsp, sp_arr, dtype, src_dtype, ctx)
 
+
 def test_create_sparse_nd_empty():
     def check_empty(shape, stype):
         arr = mx.nd.empty(shape, stype=stype)
@@ -705,6 +719,7 @@ def check_rsp_empty(shape, dtype, ctx):
     check_rsp_empty(shape, dtype, ctx)
     check_rsp_empty(shape_3d, dtype, ctx)
 
+
 def test_synthetic_dataset_generator():
     def test_powerlaw_generator(csr_arr, final_row=1):
         """Test power law distribution
diff --git a/tests/python/unittest/test_sparse_operator.py b/tests/python/unittest/test_sparse_operator.py
index a08b6187bc..c9c2f79e69 100644
--- a/tests/python/unittest/test_sparse_operator.py
+++ b/tests/python/unittest/test_sparse_operator.py
@@ -19,9 +19,11 @@
 import random
 import warnings
 
+
 def is_scalar(var):
     return False if hasattr(var, "__len__") else True
 
+
 def get_result_type(call, dflt_stype):
     """Try to infer result storage type for a sparse matrix and a given unary operation"""
     if call is not None and dflt_stype != 'default':
@@ -127,11 +129,13 @@ def get_fw_bw_result_types_2(forward_numpy_call,  fwd_res_dflt,
     return (get_result_type(forward_numpy_call,  fwd_res_dflt),
             get_result_type_2(backward_numpy_call, bwd_res_dflt))
 
+
 def get_fw_bw_result_types_with_scalar(forward_numpy_call,  fwd_res_dflt,
                                        backward_numpy_call, bwd_res_dflt):
     return (get_result_type_with_scalar(forward_numpy_call,  fwd_res_dflt),
             get_result_type_with_scalar(backward_numpy_call, bwd_res_dflt))
 
+
 def gen_rsp_random_indices(shape, density=.5, force_indices=None):
     assert density >= 0 and density <= 1
     indices = set()
@@ -151,6 +155,7 @@ def gen_rsp_random_indices(shape, density=.5, force_indices=None):
 def all_zero(var):
     return 0
 
+
 def test_elemwise_binary_ops():
     def test_elemwise_binary_op(name, lhs_stype, rhs_stype, shape,
                                 forward_mxnet_call, forward_numpy_call, backward_numpy_call,


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services