You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/08/06 22:13:30 UTC

[GitHub] sandeep-krishnamurthy closed pull request #11902: Remove stale unused Keras1-MXNet tests from MXNet repo

sandeep-krishnamurthy closed pull request #11902: Remove stale unused Keras1-MXNet tests from MXNet repo
URL: https://github.com/apache/incubator-mxnet/pull/11902
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/docs/faq/index.md b/docs/faq/index.md
index c351bc90dba..07dd9b9d7ca 100644
--- a/docs/faq/index.md
+++ b/docs/faq/index.md
@@ -45,7 +45,7 @@ and full working examples, visit the [tutorials section](../tutorials/index.md).
 
 * [How do I run MXNet on a Raspberry Pi for computer vision?](http://mxnet.io/tutorials/embedded/wine_detector.html)
 
-* [How do I run Keras 1.2.2 with mxnet backend?](https://github.com/dmlc/keras/wiki/Installation)
+* [How do I run Keras 2 with MXNet backend?](https://github.com/awslabs/keras-apache-mxnet/blob/master/docs/mxnet_backend/installation.md)
 
 * [How to convert MXNet models to Apple CoreML format?](https://github.com/apache/incubator-mxnet/tree/master/tools/coreml)
 
diff --git a/tests/nightly/mxnet_keras_integration_tests/assertion_util.py b/tests/nightly/mxnet_keras_integration_tests/assertion_util.py
deleted file mode 100644
index eb3d3bd85fd..00000000000
--- a/tests/nightly/mxnet_keras_integration_tests/assertion_util.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-
-from nose.tools import assert_true
-
-def assert_results(MACHINE_TYPE, IS_GPU, GPU_NUM, profile_output, CPU_BENCHMARK_RESULTS, GPU_1_BENCHMARK_RESULTS, GPU_2_BENCHMARK_RESULTS, GPU_4_BENCHMARK_RESULTS, GPU_8_BENCHMARK_RESULTS):
-    """
-        Helps in asserting benchmarking results.
-        Compares actual output result in profile_output with expected result in
-        CPU_BENCHMARK_RESULTS if IS_GPU is True.
-        Else, compares with GPU_1_BENCHMARK_RESULTS, GPU_2_BENCHMARK_RESULTS
-        GPU_4_BENCHMARK_RESULTS and GPU_8_BENCHMARK_RESULTS.
-
-        Uses keys - MODEL, TRAINING_TIME, MEM_CONSUMPTION, TRAIN_ACCURACY and TEST_ACCURACY
-        to fetch data from provided actual and expected results input map stated above.
-    """
-    # Model type
-    model = profile_output['MODEL']
-
-    # Actual values.
-    actual_training_time = profile_output['TRAINING_TIME']
-    actual_memory_consumption = profile_output['MEM_CONSUMPTION']
-    actual_train_accuracy = profile_output['TRAIN_ACCURACY']
-    actual_test_accuracy = profile_output['TEST_ACCURACY']
-
-    # Expected values
-    expected_training_time = 0.0
-    expected_memory_consumption = 0.0
-    expected_train_accuracy = 1.0
-    expected_test_accuracy = 1.0
-
-    # Set right set of expected values based on current run type
-    if(IS_GPU):
-        if GPU_NUM == 1:
-            expected_training_time = GPU_1_BENCHMARK_RESULTS['TRAINING_TIME']
-            expected_memory_consumption = GPU_1_BENCHMARK_RESULTS['MEM_CONSUMPTION']
-            expected_train_accuracy = GPU_1_BENCHMARK_RESULTS['TRAIN_ACCURACY']
-            expected_test_accuracy = GPU_1_BENCHMARK_RESULTS['TEST_ACCURACY']
-        elif GPU_NUM == 2:
-            expected_training_time = GPU_2_BENCHMARK_RESULTS['TRAINING_TIME']
-            expected_memory_consumption = GPU_2_BENCHMARK_RESULTS['MEM_CONSUMPTION']
-            expected_train_accuracy = GPU_2_BENCHMARK_RESULTS['TRAIN_ACCURACY']
-            expected_test_accuracy = GPU_2_BENCHMARK_RESULTS['TEST_ACCURACY']
-        elif GPU_NUM == 4:
-            expected_training_time = GPU_4_BENCHMARK_RESULTS['TRAINING_TIME']
-            expected_memory_consumption = GPU_4_BENCHMARK_RESULTS['MEM_CONSUMPTION']
-            expected_train_accuracy = GPU_4_BENCHMARK_RESULTS['TRAIN_ACCURACY']
-            expected_test_accuracy = GPU_4_BENCHMARK_RESULTS['TEST_ACCURACY']
-        elif GPU_NUM == 8:
-            expected_training_time = GPU_8_BENCHMARK_RESULTS['TRAINING_TIME']
-            expected_memory_consumption = GPU_8_BENCHMARK_RESULTS['MEM_CONSUMPTION']
-            expected_train_accuracy = GPU_8_BENCHMARK_RESULTS['TRAIN_ACCURACY']
-            expected_test_accuracy = GPU_8_BENCHMARK_RESULTS['TEST_ACCURACY']
-    else:
-        expected_training_time = CPU_BENCHMARK_RESULTS['TRAINING_TIME']
-        expected_memory_consumption = CPU_BENCHMARK_RESULTS['MEM_CONSUMPTION']
-        expected_train_accuracy = CPU_BENCHMARK_RESULTS['TRAIN_ACCURACY']
-        expected_test_accuracy = CPU_BENCHMARK_RESULTS['TEST_ACCURACY']
-
-    # Validate Results
-    assert_true(actual_training_time < expected_training_time,'{0} on {1} machine with {2} GPU usage FAILED. Expected Training Time - {3} secs but was {4} secs.'.format(model, MACHINE_TYPE, GPU_NUM, expected_training_time, actual_training_time))
-    assert_true(actual_memory_consumption < expected_memory_consumption, '{0} on {1} machine with {2} GPU usage FAILED. Expected Mem Consumption - {3} MB but was {4} MB.'.format(model, MACHINE_TYPE, GPU_NUM, expected_memory_consumption, actual_memory_consumption))
-    assert_true(actual_train_accuracy > expected_train_accuracy, '{0} on {1} machine with {2} GPU usage FAILED. Expected Train Accuracy - {3} but was {4}.'.format(model, MACHINE_TYPE, GPU_NUM, expected_train_accuracy, actual_train_accuracy))
-    assert_true(actual_test_accuracy > expected_test_accuracy, '{0} on {1} machine with {2} GPU usage FAILED. Expected Test Accuracy - {3} but was {4}.'.format(model, MACHINE_TYPE, GPU_NUM, expected_test_accuracy, actual_test_accuracy))
diff --git a/tests/nightly/mxnet_keras_integration_tests/model_util.py b/tests/nightly/mxnet_keras_integration_tests/model_util.py
deleted file mode 100644
index bb9d6374af8..00000000000
--- a/tests/nightly/mxnet_keras_integration_tests/model_util.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-import os
-from keras import backend as K
-from keras.models import Model
-from keras.layers import Input, merge
-from keras.layers.core import Lambda
-
-# Before running the integration tests, users are expected to set these
-# environment variables.
-IS_GPU = (os.environ['MXNET_KERAS_TEST_MACHINE'] == 'GPU')
-GPU_NUM = int(os.environ['GPU_NUM']) if IS_GPU else 0
-KERAS_BACKEND = os.environ['KERAS_BACKEND']
-
-def slice_batch(x, n_gpus, part):
-    sh = K.shape(x)
-    L = sh[0] / n_gpus
-    if part == n_gpus - 1:
-        return x[part*L:]
-    return x[part*L:(part+1)*L]
-
-def prepare_gpu_model(model, **kwargs):
-    gpu_list = []
-    for i in range(GPU_NUM):
-        gpu_list.append('gpu(%d)' % i)
-    if KERAS_BACKEND == 'mxnet':
-        kwargs['context'] = gpu_list
-        model.compile(**kwargs)
-    else:
-        model.compile(**kwargs)
-
-def prepare_cpu_model(model, **kwargs):
-    model.compile(**kwargs)
-
-def make_model(model, **kwargs):
-    """
-        Compiles the Keras Model object for given backend type and machine type.
-        Use this function to write one Keras code and run it across different machine type.
-
-        If environment variable - MXNET_KERAS_TEST_MACHINE is set to CPU, then Compiles
-        Keras Model for running on CPU.
-
-        If environment variable - MXNET_KERAS_TEST_MACHINE is set to GPU, then Compiles
-        Keras Model running on GPU using number of GPUs equal to number specified in
-        GPU_NUM environment variable.
-
-        Currently supports only MXNet as Keras backend.
-    """
-    if(IS_GPU):
-        prepare_gpu_model(model, **kwargs)
-    else:
-        prepare_cpu_model(model, **kwargs)
-    return model
diff --git a/tests/nightly/mxnet_keras_integration_tests/profiler.py b/tests/nightly/mxnet_keras_integration_tests/profiler.py
deleted file mode 100644
index b0d39e19aa0..00000000000
--- a/tests/nightly/mxnet_keras_integration_tests/profiler.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-import os
-import signal
-import time
-import csv
-import subprocess
-from memory_profiler import memory_usage
-
-IS_GPU = (os.environ['MXNET_KERAS_TEST_MACHINE'] == 'GPU')
-GPU_NUM = int(os.environ['GPU_NUM']) if IS_GPU else 0
-
-# This command is useful to fetch GPU memory consumption.
-GPU_MONITOR_CMD = "nvidia-smi --query-gpu=index,memory.used --format=csv -lms 500 -f output.csv"
-
-def cpu_memory_profile(func_to_profile):
-    max_mem_usage = memory_usage(proc=(func_to_profile, ()), max_usage=True)
-    return max_mem_usage[0]
-
-def gpu_mem_profile(file_name):
-    row_count = 0
-    # In MBs
-    max_mem_usage = 0
-    with open(file_name, 'r') as csv_file:
-        csv_reader = csv.reader(csv_file)
-        last_line_broken = False
-        for row in csv_reader:
-            if row_count == 0:
-                row_count += 1
-                continue
-            if len(row) < 2 or not 'MiB' in row[1]:
-                last_line_broken = True
-            row_count += 1
-        row_count -= 1
-        if row_count % GPU_NUM == 0 and last_line_broken:
-            row_count -= GPU_NUM
-        else:
-            row_count -= row_count % GPU_NUM
-
-    with open(file_name, 'r') as csv_file:
-        csv_reader = csv.reader(csv_file)
-        current_usage = 0
-        mem_recoder = [0] * GPU_NUM
-        row_num = 0
-        for row in csv_reader:
-            if row_num == 0:
-                row_num += 1
-                continue
-            mem_str = row[1].lstrip().rstrip()[:-4]
-            mem_num = float(mem_str)
-            current_usage += mem_num
-            mem_recoder[(row_num - 1) % GPU_NUM] += mem_num
-            if row_num % GPU_NUM == 0:
-                max_mem_usage = max(max_mem_usage, current_usage)
-                current_usage = 0
-            row_num += 1
-            if row_num > row_count:
-                break
-        row_num -= 1
-    os.remove(file_name)
-    return max_mem_usage
-
-def profile(func_to_profile):
-    """
-        This function helps in profile given func_to_profile for run-time and
-        memory consumption.
-
-        Capable of profile for both GPU and CPU machine.
-
-        Uses environment variable - IS_GPU to identify whether to profile for
-        CPU or GPU.
-
-        returns: run_time, memory_usage
-    """
-    run_time = 0; # Seconds
-    memory_usage = 0; # MBs
-
-    # Choose nvidia-smi or memory_profiler for memory profiling for GPU and CPU
-    # machines respectively.
-    if(IS_GPU):
-        # Start time - For timing the runtime
-        start_time = time.time()
-        open('nvidia-smi-output.csv', 'a').close()
-        gpu_monitor_process = subprocess.Popen(GPU_MONITOR_CMD,
-                                                  shell=True, preexec_fn=os.setsid)
-        func_to_profile()
-        end_time = time.time()
-        os.killpg(os.getpgid(gpu_monitor_process.pid), signal.SIGTERM)
-        run_time = end_time - start_time
-        memory_usage = gpu_mem_profile('nvidia-smi-output.csv')
-    else:
-        # Start time - For timing the runtime
-        start_time = time.time()
-        memory_usage = cpu_memory_profile(func_to_profile)
-        end_time = time.time()
-        run_time = end_time - start_time
-
-    return run_time, memory_usage
diff --git a/tests/nightly/mxnet_keras_integration_tests/test_mnist_mlp.py b/tests/nightly/mxnet_keras_integration_tests/test_mnist_mlp.py
deleted file mode 100644
index 89bd2805ce7..00000000000
--- a/tests/nightly/mxnet_keras_integration_tests/test_mnist_mlp.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-'''
-This code is forked from https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
-and modified to use as MXNet-Keras integration testing for functionality and sanity performance
-benchmarking.
-
-Trains a simple deep NN on the MNIST dataset.
-
-Gets to 98.40% test accuracy after 20 epochs
-(there is *a lot* of margin for parameter tuning).
-2 seconds per epoch on a K520 GPU.
-'''
-
-from __future__ import print_function
-import numpy as np
-np.random.seed(1337)  # for reproducibility
-
-from os import environ
-
-from keras.datasets import mnist
-from keras.models import Sequential
-from keras.layers.core import Dense, Dropout, Activation
-from keras.optimizers import SGD
-from keras.utils import np_utils
-
-# Imports for benchmarking
-from profiler import profile
-from model_util import make_model
-
-# Imports for assertions
-from assertion_util import assert_results
-
-# Other environment variables
-MACHINE_TYPE = environ['MXNET_KERAS_TEST_MACHINE']
-IS_GPU = (environ['MXNET_KERAS_TEST_MACHINE'] == 'GPU')
-MACHINE_TYPE = 'GPU' if IS_GPU else 'CPU'
-GPU_NUM = int(environ['GPU_NUM']) if IS_GPU else 0
-
-# Expected Benchmark Numbers
-CPU_BENCHMARK_RESULTS = {'TRAINING_TIME':550.0, 'MEM_CONSUMPTION':400.0, 'TRAIN_ACCURACY': 0.85, 'TEST_ACCURACY':0.85}
-GPU_1_BENCHMARK_RESULTS = {'TRAINING_TIME':40.0, 'MEM_CONSUMPTION':200, 'TRAIN_ACCURACY': 0.85, 'TEST_ACCURACY':0.85}
-# TODO: Fix Train and Test accuracy numbers in multiple gpu mode. Setting it to 0 for now to get whole integration set up done
-GPU_2_BENCHMARK_RESULTS = {'TRAINING_TIME':45.0, 'MEM_CONSUMPTION':375, 'TRAIN_ACCURACY': 0.0, 'TEST_ACCURACY':0.0}
-GPU_4_BENCHMARK_RESULTS = {'TRAINING_TIME':55.0, 'MEM_CONSUMPTION':750.0, 'TRAIN_ACCURACY': 0.0, 'TEST_ACCURACY':0.0}
-GPU_8_BENCHMARK_RESULTS = {'TRAINING_TIME':100.0, 'MEM_CONSUMPTION':1800.0, 'TRAIN_ACCURACY': 0.0, 'TEST_ACCURACY':0.0}
-
-# Dictionary to store profiling output
-profile_output = {}
-
-batch_size = 128
-nb_classes = 10
-nb_epoch = 20
-
-# the data, shuffled and split between train and test sets
-(X_train, y_train), (X_test, y_test) = mnist.load_data()
-
-X_train = X_train.reshape(60000, 784)
-X_test = X_test.reshape(10000, 784)
-X_train = X_train.astype('float32')
-X_test = X_test.astype('float32')
-X_train /= 255
-X_test /= 255
-
-# convert class vectors to binary class matrices
-Y_train = np_utils.to_categorical(y_train, nb_classes)
-Y_test = np_utils.to_categorical(y_test, nb_classes)
-
-model = Sequential()
-model.add(Dense(512, input_shape=(784,)))
-model.add(Activation('relu'))
-model.add(Dropout(0.2))
-model.add(Dense(512))
-model.add(Activation('relu'))
-model.add(Dropout(0.2))
-model.add(Dense(10))
-model.add(Activation('softmax'))
-
-model.summary()
-make_model(model, loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy'])
-
-def train_model():
-    history = model.fit(X_train, Y_train,
-                    batch_size=batch_size, nb_epoch=nb_epoch,
-                    verbose=1, validation_data=(X_test, Y_test))
-    profile_output['TRAIN_ACCURACY'] = history.history['acc'][-1]
-
-def test_run():
-    # Calling training and profile memory usage
-    profile_output["MODEL"] = "MNIST MLP"
-    run_time, memory_usage = profile(train_model)
-
-    profile_output['TRAINING_TIME'] = float(run_time)
-    profile_output['MEM_CONSUMPTION'] = float(memory_usage)
-
-    score = model.evaluate(X_test, Y_test, verbose=0)
-    profile_output["TEST_ACCURACY"] = score[1]
-
-    assert_results(MACHINE_TYPE, IS_GPU, GPU_NUM, profile_output, CPU_BENCHMARK_RESULTS, GPU_1_BENCHMARK_RESULTS, GPU_2_BENCHMARK_RESULTS, GPU_4_BENCHMARK_RESULTS, GPU_8_BENCHMARK_RESULTS)
diff --git a/tests/nightly/test_mxnet_keras_integration_cpu.sh b/tests/nightly/test_mxnet_keras_integration_cpu.sh
deleted file mode 100755
index 95cc0d0760e..00000000000
--- a/tests/nightly/test_mxnet_keras_integration_cpu.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-set -e
-### Build MXNet with CPU support
-echo "BUILD make"
-cp ./make/config.mk .
-echo "USE_CUDA=0" >> ./config.mk
-echo "USE_CUDNN=0" >> ./config.mk
-echo "USE_BLAS=openblas" >> ./config.mk
-echo "ADD_CFLAGS += -I/usr/include/openblas" >> ./config.mk
-echo "GTEST_PATH=/usr/local/gtest" >> ./config.mk
-echo 'export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig:$PKG_CONFIG_PATH' >> ~/.profile
-echo 'export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH' >> ~/.profile
-echo 'export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.25.amzn1.x86_64' >> ~/.profile
-echo 'export JRE_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.25.amzn1.x86_64/jre' >> ~/.profile
-echo 'export PATH=$PATH:/apache-maven-3.3.9/bin/:/usr/bin:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.25.amzn1.x86_64/bin:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.25.amzn1.x86_64/jre/bin' >> ~/.profile
-source ~/.profile
-make clean
-make -j 4 || exit -1
-
-echo "BUILD python2 mxnet"
-cd ./python
-python setup.py install || exit 1
-
-echo "BUILD python3 mxnet"
-python3 setup.py install || exit 1
-
-# Come out of Mxnet directory.
-cd ..
-
-# Required for Keras installation
-pip install pyyaml
-
-# If already exist remove and fork DMLC/keras and install.
-# Note: This should eventually be replaced with pip install when mxnet backend is part of fchollet/keras
-
-########### Set up Keras ####################
-echo "Installing Keras. This can take few minutes..."
-# Clone keras repository from dmlc. This has mxnet backend implementated.
-if [ -d "keras" ]; then
-  rm -rf keras/
-fi
-
-git clone https://github.com/dmlc/keras.git --recursive
-cd keras
-python setup.py install
-
-########### Set up packages for profiling #########
-echo "Installing memory_profile and psutil for profiling. This can take few minutes..."
-pip install memory_profiler
-pip install psutil
-
-########## Set Environment Variables ########
-echo "Setting Environment Variables for MXNet Keras Integration Tests on CPU machine"
-export KERAS_BACKEND="mxnet"
-export MXNET_KERAS_TEST_MACHINE='CPU'
-
-########## Call the test script ############
-cd ../../mxnet/tests/nightly
-echo "Running MXNet Keras Integration Test on CPU machine"
-nosetests --with-xunit --quiet --nologcapture mxnet_keras_integration_tests/
diff --git a/tests/nightly/test_mxnet_keras_integration_gpu.sh b/tests/nightly/test_mxnet_keras_integration_gpu.sh
deleted file mode 100755
index 5d541fa5b7a..00000000000
--- a/tests/nightly/test_mxnet_keras_integration_gpu.sh
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-set -e
-
-### Install git
-apt-get update
-apt-get install git-all
-
-### Build MXNet with CPU support
-echo "BUILD make"
-cp ./make/config.mk .
-echo "USE_CUDA=1" >> ./config.mk
-echo "USE_CUDA_PATH=/usr/local/cuda" >> config.mk
-echo "USE_CUDNN=1" >> ./config.mk
-echo "USE_BLAS=openblas" >> ./config.mk
-echo "ADD_CFLAGS += -I/usr/include/openblas" >> ./config.mk
-echo "GTEST_PATH=/usr/local/gtest" >> ./config.mk
-export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig:$PKG_CONFIG_PATH
-export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
-export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.25.amzn1.x86_64
-export JRE_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.25.amzn1.x86_64/jre
-export PATH=$PATH:/apache-maven-3.3.9/bin/:/usr/bin:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.25.amzn1.x86_64/bin:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.25.amzn1.x86_64/jre/bin
-
-make clean
-make -j 4 || exit -1
-
-echo "BUILD python2 mxnet"
-cd ./python
-python setup.py install || exit 1
-
-echo "BUILD python3 mxnet"
-python3 setup.py install || exit 1
-
-# Come out of MXNet directory
-cd ..
-
-# Dependencies required for Keras installation
-pip install pyyaml
-
-pip install --upgrade pip
-pip install --upgrade six
-
-# If already exist remove and fork DMLC/keras and install.
-# Note: This should eventually be replaced with pip install when mxnet backend is part of fchollet/keras
-
-########### Set up Keras ####################
-echo "Installing Keras. This can take few minutes..."
-# Clone keras repository from dmlc. This has mxnet backend implementated.
-if [ -d "keras" ]; then
-  rm -rf keras/
-fi
-
-git clone https://github.com/dmlc/keras.git --recursive
-cd keras
-python setup.py install
-
-########### Set up packages for profiling #########
-echo "Installing memory_profile and psutil for profiling. This can take few minutes..."
-pip install memory_profiler
-pip install psutil
-
-########## Set Environment Variables ########
-echo "Setting Environment Variables for MXNet Keras Integration Tests on CPU machine"
-cd ../../mxnet/tests/nightly
-
-export KERAS_BACKEND="mxnet"
-export MXNET_KERAS_TEST_MACHINE='GPU'
-########## Call the test script with 1 GPUS ############
-
-export GPU_NUM='1'
-echo "Running MXNet Keras Integration Test on GPU machine with 1 GPUs"
-nosetests --with-xunit --quiet --nologcapture mxnet_keras_integration_tests/
-
-########## Call the test script with 2 GPUS ############
-
-export GPU_NUM='2'
-echo "Running MXNet Keras Integration Test on GPU machine with 2 GPUs"
-nosetests --with-xunit --quiet --nologcapture mxnet_keras_integration_tests/
-
-########## Call the test script with 4 GPUS ############
-
-export GPU_NUM='4'
-echo "Running MXNet Keras Integration Test on GPU machine with 4 GPUs"
-nosetests --with-xunit --quiet --nologcapture mxnet_keras_integration_tests/
-
-########## Call the test script with 8 GPUS ############
-
-export GPU_NUM='8'
-echo "Running MXNet Keras Integration Test on GPU machine with 8 GPUs"
-nosetests --with-xunit --quiet --nologcapture mxnet_keras_integration_tests/


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services