You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by in...@apache.org on 2018/11/02 23:04:18 UTC

[incubator-mxnet] branch master updated: Updated / Deleted some examples (#12968)

This is an automated email from the ASF dual-hosted git repository.

indhub pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 3e8a976  Updated / Deleted some examples (#12968)
3e8a976 is described below

commit 3e8a976d805dee130831d4f54b7a5dd9f1a7c7bd
Author: Thomas Delteil <th...@gmail.com>
AuthorDate: Fri Nov 2 16:03:51 2018 -0700

    Updated / Deleted some examples (#12968)
    
    * Updated / Deleted some examples
    
    * remove onnx test
    
    * remove onnx test
---
 ci/docker/runtime_functions.sh                     |  1 -
 example/multivariate_time_series/README.md         |  4 +-
 example/named_entity_recognition/README.md         |  1 -
 example/named_entity_recognition/src/metrics.py    |  2 +-
 example/named_entity_recognition/src/ner.py        |  2 +-
 example/nce-loss/README.md                         |  2 +-
 example/numpy-ops/numpy_softmax.py                 | 84 ---------------------
 example/onnx/super_resolution.py                   | 86 ----------------------
 example/python-howto/README.md                     | 37 ----------
 example/python-howto/data_iter.py                  | 76 -------------------
 example/python-howto/debug_conv.py                 | 39 ----------
 example/python-howto/monitor_weights.py            | 46 ------------
 example/python-howto/multiple_outputs.py           | 38 ----------
 .../{mxnet_adversarial_vae => vae-gan}/README.md   |  0
 .../convert_data.py                                |  0
 .../vaegan_mxnet.py                                |  0
 .../python-pytest/onnx/import/onnx_import_test.py  | 15 ----
 17 files changed, 6 insertions(+), 427 deletions(-)

diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh
index 0adec07..095eb57 100755
--- a/ci/docker/runtime_functions.sh
+++ b/ci/docker/runtime_functions.sh
@@ -877,7 +877,6 @@ unittest_centos7_gpu() {
 integrationtest_ubuntu_cpu_onnx() {
 	set -ex
 	export PYTHONPATH=./python/
-	python example/onnx/super_resolution.py
 	pytest tests/python-pytest/onnx/import/mxnet_backend_test.py
 	pytest tests/python-pytest/onnx/import/onnx_import_test.py
 	pytest tests/python-pytest/onnx/import/gluon_backend_test.py
diff --git a/example/multivariate_time_series/README.md b/example/multivariate_time_series/README.md
index 704c86a..87baca3 100644
--- a/example/multivariate_time_series/README.md
+++ b/example/multivariate_time_series/README.md
@@ -3,6 +3,8 @@
 - This repo contains an MXNet implementation of [this](https://arxiv.org/pdf/1703.07015.pdf) state of the art time series forecasting model.
 - You can find my blog post on the model [here](https://opringle.github.io/2018/01/05/deep_learning_multivariate_ts.html)
 
+- A Gluon implementation is available [here](https://github.com/safrooze/LSTNet-Gluon)
+
 ![](./docs/model_architecture.png)
 
 ## Running the code
@@ -22,7 +24,7 @@
 
 ## Hyperparameters
 
-The default arguements in `lstnet.py` achieve equivolent performance to the published results. For other datasets, the following hyperparameters provide a good starting point:
+The default arguements in `lstnet.py` achieve equivalent performance to the published results. For other datasets, the following hyperparameters provide a good starting point:
 
 - q = {2^0, 2^1, ... , 2^9} (1 week is typical value)
 - Convolutional num filters  = {50, 100, 200}
diff --git a/example/named_entity_recognition/README.md b/example/named_entity_recognition/README.md
index 260c19d..2b28b3b 100644
--- a/example/named_entity_recognition/README.md
+++ b/example/named_entity_recognition/README.md
@@ -11,7 +11,6 @@ To reproduce the preprocessed training data:
 
 1. Download and unzip the data: https://www.kaggle.com/abhinavwalia95/entity-annotated-corpus/downloads/ner_dataset.csv
 2. Move ner_dataset.csv into `./data`
-3. create `./preprocessed_data` directory
 3. `$ cd src && python preprocess.py`
 
 To train the model:
diff --git a/example/named_entity_recognition/src/metrics.py b/example/named_entity_recognition/src/metrics.py
index 40c5015..d3d7378 100644
--- a/example/named_entity_recognition/src/metrics.py
+++ b/example/named_entity_recognition/src/metrics.py
@@ -27,7 +27,7 @@ def load_obj(name):
     with open(name + '.pkl', 'rb') as f:
         return pickle.load(f)
 
-tag_dict = load_obj("../preprocessed_data/tag_to_index")
+tag_dict = load_obj("../data/tag_to_index")
 not_entity_index = tag_dict["O"]
 
 def classifer_metrics(label, pred):
diff --git a/example/named_entity_recognition/src/ner.py b/example/named_entity_recognition/src/ner.py
index 561db4c..7f5dd84 100644
--- a/example/named_entity_recognition/src/ner.py
+++ b/example/named_entity_recognition/src/ner.py
@@ -34,7 +34,7 @@ logging.basicConfig(level=logging.DEBUG)
 
 parser = argparse.ArgumentParser(description="Deep neural network for multivariate time series forecasting",
                                  formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-parser.add_argument('--data-dir', type=str, default='../preprocessed_data',
+parser.add_argument('--data-dir', type=str, default='../data',
                     help='relative path to input data')
 parser.add_argument('--output-dir', type=str, default='../results',
                     help='directory to save model files to')
diff --git a/example/nce-loss/README.md b/example/nce-loss/README.md
index 70730b4..56e4352 100644
--- a/example/nce-loss/README.md
+++ b/example/nce-loss/README.md
@@ -29,7 +29,7 @@ The dataset used in the following examples is [text8](http://mattmahoney.net/dc/
 * word2vec.py: a CBOW word2vec example using nce loss. You need to [download the text8 dataset](#dataset-download) before running this script. Command to start training on CPU (pass -g for training on GPU):
 
 ```
-python word2vec.py
+python wordvec.py
 
 ```
 
diff --git a/example/numpy-ops/numpy_softmax.py b/example/numpy-ops/numpy_softmax.py
deleted file mode 100644
index 88d2473..0000000
--- a/example/numpy-ops/numpy_softmax.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# pylint: skip-file
-import mxnet as mx
-from mxnet.test_utils import get_mnist_iterator
-import numpy as np
-import logging
-
-
-class NumpySoftmax(mx.operator.NumpyOp):
-    def __init__(self):
-        super(NumpySoftmax, self).__init__(False)
-
-    def list_arguments(self):
-        return ['data', 'label']
-
-    def list_outputs(self):
-        return ['output']
-
-    def infer_shape(self, in_shape):
-        data_shape = in_shape[0]
-        label_shape = (in_shape[0][0],)
-        output_shape = in_shape[0]
-        return [data_shape, label_shape], [output_shape]
-
-    def forward(self, in_data, out_data):
-        x = in_data[0]
-        y = out_data[0]
-        y[:] = np.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
-        y /= y.sum(axis=1).reshape((x.shape[0], 1))
-
-    def backward(self, out_grad, in_data, out_data, in_grad):
-        l = in_data[1]
-        l = l.reshape((l.size,)).astype(np.int)
-        y = out_data[0]
-        dx = in_grad[0]
-        dx[:] = y
-        dx[np.arange(l.shape[0]), l] -= 1.0
-
-# define mlp
-
-data = mx.symbol.Variable('data')
-fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
-act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
-fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
-act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
-fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
-#mlp = mx.symbol.Softmax(data = fc3, name = 'mlp')
-mysoftmax = NumpySoftmax()
-mlp = mysoftmax(data=fc3, name = 'softmax')
-
-# data
-
-train, val = get_mnist_iterator(batch_size=100, input_shape = (784,))
-
-# train
-
-logging.basicConfig(level=logging.DEBUG)
-
-# MXNET_CPU_WORKER_NTHREADS must be greater than 1 for custom op to work on CPU
-context=mx.cpu()
-# Uncomment this line to train on GPU instead of CPU
-# context=mx.gpu(0)
-
-mod = mx.mod.Module(mlp, context=context)
-
-mod.fit(train_data=train, eval_data=val, optimizer='sgd',
-    optimizer_params={'learning_rate':0.1, 'momentum': 0.9, 'wd': 0.00001},
-    num_epoch=10, batch_end_callback=mx.callback.Speedometer(100, 100))
diff --git a/example/onnx/super_resolution.py b/example/onnx/super_resolution.py
deleted file mode 100644
index fcb8ccc..0000000
--- a/example/onnx/super_resolution.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-"""Testing super_resolution model conversion"""
-from __future__ import absolute_import as _abs
-from __future__ import print_function
-from collections import namedtuple
-import logging
-import numpy as np
-from PIL import Image
-import mxnet as mx
-from mxnet.test_utils import download
-import mxnet.contrib.onnx as onnx_mxnet
-
-# set up logger
-logging.basicConfig()
-LOGGER = logging.getLogger()
-LOGGER.setLevel(logging.INFO)
-
-def import_onnx():
-    """Import the onnx model into mxnet"""
-    model_url = 'https://s3.amazonaws.com/onnx-mxnet/examples/super_resolution.onnx'
-    download(model_url, 'super_resolution.onnx')
-
-    LOGGER.info("Converting onnx format to mxnet's symbol and params...")
-    sym, arg_params, aux_params = onnx_mxnet.import_model('super_resolution.onnx')
-    LOGGER.info("Successfully Converted onnx format to mxnet's symbol and params...")
-    return sym, arg_params, aux_params
-
-def get_test_image():
-    """Download and process the test image"""
-    # Load test image
-    input_image_dim = 224
-    img_url = 'https://s3.amazonaws.com/onnx-mxnet/examples/super_res_input.jpg'
-    download(img_url, 'super_res_input.jpg')
-    img = Image.open('super_res_input.jpg').resize((input_image_dim, input_image_dim))
-    img_ycbcr = img.convert("YCbCr")
-    img_y, img_cb, img_cr = img_ycbcr.split()
-    input_image = np.array(img_y)[np.newaxis, np.newaxis, :, :]
-    return input_image, img_cb, img_cr
-
-def perform_inference(sym, arg_params, aux_params, input_img, img_cb, img_cr):
-    """Perform inference on image using mxnet"""
-    metadata = onnx_mxnet.get_model_metadata('super_resolution.onnx')
-    data_names = [input_name[0] for input_name in metadata.get('input_tensor_data')]
-    # create module
-    mod = mx.mod.Module(symbol=sym, data_names=data_names, label_names=None)
-    mod.bind(for_training=False, data_shapes=[(data_names[0], input_img.shape)])
-    mod.set_params(arg_params=arg_params, aux_params=aux_params)
-
-    # run inference
-    batch = namedtuple('Batch', ['data'])
-    mod.forward(batch([mx.nd.array(input_img)]))
-
-    # Save the result
-    img_out_y = Image.fromarray(np.uint8(mod.get_outputs()[0][0][0].
-                                         asnumpy().clip(0, 255)), mode='L')
-
-    result_img = Image.merge(
-        "YCbCr", [img_out_y,
-                  img_cb.resize(img_out_y.size, Image.BICUBIC),
-                  img_cr.resize(img_out_y.size, Image.BICUBIC)]).convert("RGB")
-    output_img_dim = 672
-    assert result_img.size == (output_img_dim, output_img_dim)
-    LOGGER.info("Super Resolution example success.")
-    result_img.save("super_res_output.jpg")
-    return result_img
-
-if __name__ == '__main__':
-    MX_SYM, MX_ARG_PARAM, MX_AUX_PARAM = import_onnx()
-    INPUT_IMG, IMG_CB, IMG_CR = get_test_image()
-    perform_inference(MX_SYM, MX_ARG_PARAM, MX_AUX_PARAM, INPUT_IMG, IMG_CB, IMG_CR)
diff --git a/example/python-howto/README.md b/example/python-howto/README.md
deleted file mode 100644
index 2965240..0000000
--- a/example/python-howto/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-Python Howto Examples
-=====================
-
-* [Configuring Net to Get Multiple Ouputs](multiple_outputs.py)
-* [Configuring Image Record Iterator](data_iter.py)
-* [Monitor Intermediate Outputs in the Network](monitor_weights.py)
-* Set break point in C++ code of the symbol using gdb under Linux:
-
-	* 	Build mxnet with following values:
-
-		 ```
-		 	DEBUG=1 
-		 	USE_CUDA=0 # to make sure convolution-inl.h will be used
-		 	USE_CUDNN=0 # to make sure convolution-inl.h will be used
-		 ```
-		 
-	*  run python under gdb:  ```gdb --args python debug_conv.py```
-	*  in gdb set break point on particular line of the code and run execution: 
-
-```
-(gdb) break src/operator/convolution-inl.h:120
-(gdb) run
-Breakpoint 1, mxnet::op::ConvolutionOp<mshadow::cpu, float>::Forward (this=0x12219d0, ctx=..., in_data=std::vector of length 3, capacity 4 = {...}, req=std::vector of length 1, capacity 1 = {...}, out_data=std::vector of length 1, capacity 1 = {...},
-    aux_args=std::vector of length 0, capacity 0) at src/operator/./convolution-inl.h:121
-121	               data.shape_[1] / param_.num_group * param_.kernel[0] * param_.kernel[1]);
-(gdb) list
-116	    }
-117	    Tensor<xpu, 4, DType> data = in_data[conv::kData].get<xpu, 4, DType>(s);
-118	    Shape<3> wmat_shape =
-119	        Shape3(param_.num_group,
-120	               param_.num_filter / param_.num_group,
-121	               data.shape_[1] / param_.num_group * param_.kernel[0] * param_.kernel[1]);
-122	    Tensor<xpu, 3, DType> wmat =
-123	        in_data[conv::kWeight].get_with_shape<xpu, 3, DType>(wmat_shape, s);
-124	    Tensor<xpu, 4, DType> out = out_data[conv::kOut].get<xpu, 4, DType>(s);
-125	#if defined(__CUDACC__)
-```
diff --git a/example/python-howto/data_iter.py b/example/python-howto/data_iter.py
deleted file mode 100644
index 81c8988..0000000
--- a/example/python-howto/data_iter.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-"""Create a Cifar data iterator.
-
-This example shows how to create a iterator reading from recordio,
-introducing image augmentations and using a backend thread to hide IO cost.
-All you need to do is to set some parameters.
-"""
-import mxnet as mx
-
-dataiter = mx.io.ImageRecordIter(
-        # Dataset Parameter
-        # Impulsary
-        # indicating the data file, please check the data is already there
-        path_imgrec="data/cifar/train.rec",
-        # Dataset/Augment Parameter
-        # Impulsary
-        # indicating the image size after preprocessing
-        data_shape=(3,28,28),
-        # Batch Parameter
-        # Impulsary
-        # tells how many images in a batch
-        batch_size=100,
-        # Augmentation Parameter
-        # Optional
-        # when offers mean_img, each image will subtract the mean value at each pixel
-        mean_img="data/cifar/cifar10_mean.bin",
-        # Augmentation Parameter
-        # Optional
-        # randomly crop a patch of the data_shape from the original image
-        rand_crop=True,
-        # Augmentation Parameter
-        # Optional
-        # randomly mirror the image horizontally
-        rand_mirror=True,
-        # Augmentation Parameter
-        # Optional
-        # randomly shuffle the data
-        shuffle=False,
-        # Backend Parameter
-        # Optional
-        # Preprocessing thread number
-        preprocess_threads=4,
-        # Backend Parameter
-        # Optional
-        # Prefetch buffer size
-        prefetch_buffer=4,
-        # Backend Parameter,
-        # Optional
-        # Whether round batch,
-        round_batch=True)
-
-batchidx = 0
-for dbatch in dataiter:
-    data = dbatch.data[0]
-    label = dbatch.label[0]
-    pad = dbatch.pad
-    index = dbatch.index
-    print("Batch", batchidx)
-    print(label.asnumpy().flatten())
-    batchidx += 1
diff --git a/example/python-howto/debug_conv.py b/example/python-howto/debug_conv.py
deleted file mode 100644
index 9de421d..0000000
--- a/example/python-howto/debug_conv.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-import mxnet as mx
-
-data_shape = (1,3,5,5)
-class SimpleData(object):
-
-    def __init__(self, data):
-        self.data = data
-
-data = mx.sym.Variable('data')
-conv = mx.sym.Convolution(data=data, kernel=(3,3), pad=(1,1), stride=(1,1), num_filter=1)
-mon = mx.mon.Monitor(1)
-
-
-mod = mx.mod.Module(conv)
-mod.bind(data_shapes=[('data', data_shape)])
-mod._exec_group.install_monitor(mon)
-mod.init_params()
-
-input_data = mx.nd.ones(data_shape)
-mod.forward(data_batch=SimpleData([input_data]))
-res = mod.get_outputs()[0].asnumpy()
-print(res)
diff --git a/example/python-howto/monitor_weights.py b/example/python-howto/monitor_weights.py
deleted file mode 100644
index 929b0e7..0000000
--- a/example/python-howto/monitor_weights.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# pylint: skip-file
-import mxnet as mx
-from mxnet.test_utils import get_mnist_iterator
-import numpy as np
-import logging
-
-# network
-data = mx.symbol.Variable('data')
-fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
-act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
-fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
-act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
-fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
-mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
-
-# data
-train, val = get_mnist_iterator(batch_size=100, input_shape = (784,))
-
-# monitor
-def norm_stat(d):
-    return mx.nd.norm(d)/np.sqrt(d.size)
-mon = mx.mon.Monitor(100, norm_stat)
-
-# train with monitor
-logging.basicConfig(level=logging.DEBUG)
-module = mx.module.Module(context=mx.cpu(), symbol=mlp)
-module.fit(train_data=train, eval_data=val, monitor=mon, num_epoch=2,
-           batch_end_callback = mx.callback.Speedometer(100, 100),
-           optimizer_params=(('learning_rate', 0.1), ('momentum', 0.9), ('wd', 0.00001)))
diff --git a/example/python-howto/multiple_outputs.py b/example/python-howto/multiple_outputs.py
deleted file mode 100644
index 7c1ddd2..0000000
--- a/example/python-howto/multiple_outputs.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-"""Create a Multiple output configuration.
-
-This example shows how to create a multiple output configuration.
-"""
-from __future__ import print_function
-import mxnet as mx
-
-net = mx.symbol.Variable('data')
-fc1 = mx.symbol.FullyConnected(data=net, name='fc1', num_hidden=128)
-net = mx.symbol.Activation(data=fc1, name='relu1', act_type="relu")
-net = mx.symbol.FullyConnected(data=net, name='fc2', num_hidden=64)
-out = mx.symbol.SoftmaxOutput(data=net, name='softmax')
-# group fc1 and out together
-group = mx.symbol.Group([fc1, out])
-print(group.list_outputs())
-
-# You can go ahead and bind on the group
-# executor = group.simple_bind(data=data_shape)
-# executor.forward()
-# executor.output[0] will be value of fc1
-# executor.output[1] will be value of softmax
diff --git a/example/mxnet_adversarial_vae/README.md b/example/vae-gan/README.md
similarity index 100%
rename from example/mxnet_adversarial_vae/README.md
rename to example/vae-gan/README.md
diff --git a/example/mxnet_adversarial_vae/convert_data.py b/example/vae-gan/convert_data.py
similarity index 100%
rename from example/mxnet_adversarial_vae/convert_data.py
rename to example/vae-gan/convert_data.py
diff --git a/example/mxnet_adversarial_vae/vaegan_mxnet.py b/example/vae-gan/vaegan_mxnet.py
similarity index 100%
rename from example/mxnet_adversarial_vae/vaegan_mxnet.py
rename to example/vae-gan/vaegan_mxnet.py
diff --git a/tests/python-pytest/onnx/import/onnx_import_test.py b/tests/python-pytest/onnx/import/onnx_import_test.py
index 573dd74..c2d1e9c 100644
--- a/tests/python-pytest/onnx/import/onnx_import_test.py
+++ b/tests/python-pytest/onnx/import/onnx_import_test.py
@@ -149,21 +149,6 @@ def test_equal():
     output = bkd_rep.run([input1, input2])
     npt.assert_almost_equal(output[0], numpy_op)
 
-def test_super_resolution_example():
-    """Test the super resolution example in the example/onnx folder"""
-    sys.path.insert(0, os.path.join(CURR_PATH, '../../../../example/onnx/'))
-    import super_resolution
-
-    sym, arg_params, aux_params = super_resolution.import_onnx()
-
-    logging.info("Asserted the result of the onnx model conversion")
-    output_img_dim = 672
-    input_image, img_cb, img_cr = super_resolution.get_test_image()
-    result_img = super_resolution.perform_inference(sym, arg_params, aux_params,
-                                                    input_image, img_cb, img_cr)
-
-    assert hashlib.md5(result_img.tobytes()).hexdigest() == '0d98393a49b1d9942106a2ed89d1e854'
-    assert result_img.size == (output_img_dim, output_img_dim)
 
 def get_test_files(name):
     """Extract tar file and returns model path and input, output data"""