You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@submarine.apache.org by pi...@apache.org on 2022/02/22 03:11:56 UTC

[submarine] branch master updated: SUBMARINE-1196. Remove administrator docs on the submarine website

This is an automated email from the ASF dual-hosted git repository.

pingsutw pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/submarine.git


The following commit(s) were added to refs/heads/master by this push:
     new 61851bc  SUBMARINE-1196. Remove administrator docs on the submarine website
61851bc is described below

commit 61851bc5a9f890accd2015ab6878cec8372c827d
Author: MortalHappiness <b0...@ntu.edu.tw>
AuthorDate: Mon Feb 21 14:31:42 2022 +0800

    SUBMARINE-1196. Remove administrator docs on the submarine website
    
    ### What is this PR for?
    Currently, the administrator docs on the submarine website only contain Yarn-related docs. Therefore it can be removed.
    
    ### What type of PR is it?
    [Improvement]
    
    ### Todos
    
    ### What is the Jira issue?
    https://issues.apache.org/jira/browse/SUBMARINE-1196
    
    ### How should this be tested?
    
    ### Screenshots (if appropriate)
    
    ### Questions:
    * Do the license files need updating? No
    * Are there breaking changes for older versions? No
    * Does this need new documentation? No
    
    Author: MortalHappiness <b0...@ntu.edu.tw>
    
    Signed-off-by: Kevin <pi...@apache.org>
    
    Closes #883 from MortalHappiness/SUBMARINE-1196 and squashes the following commits:
    
    40ea6dbf [MortalHappiness] Fix broken links
    496ca336 [MortalHappiness] SUBMARINE-1196. Remove administrator docs on the submarine website
---
 website/docs/adminDocs/yarn/README.md              |  39 --
 .../docs/adminDocs/yarn/TestAndTroubleshooting.md  | 151 -----
 website/docs/adminDocs/yarn/workbench/HowToRun.md  | 103 ----
 website/docs/adminDocs/yarn/workbench/README.md    | 177 ------
 .../docs/adminDocs/yarn/workbench/README.zh-CN.md  | 176 ------
 .../adminDocs/yarn/workbench/notebook/jupyter.yaml |  60 --
 .../yarn/workbench/notebook/setup-jupyter.md       |  73 ---
 .../kaldi/RunningDistributedThchs30KaldiJobs.md    | 678 ---------------------
 .../docs/ecosystem/kaldi/WriteDockerfileKaldi.md   | 112 ----
 .../base/ubuntu-18.04/Dockerfile.gpu.kaldi_latest  |  74 ---
 website/docs/ecosystem/kaldi/build-all.sh          |  26 -
 website/docs/ecosystem/kaldi/sge/gencfs.sh         | 154 -----
 website/docs/ecosystem/kaldi/sge/group             |  66 --
 website/docs/ecosystem/kaldi/sge/passwd            |  42 --
 website/docs/ecosystem/kaldi/sge/resolv.conf       |  23 -
 website/docs/ecosystem/kaldi/sge/sge_run.sh        |  84 ---
 website/docs/userDocs/yarn/Dockerfiles.md          |  24 -
 .../docs/userDocs/yarn/TestAndTroubleshooting.md   | 151 -----
 website/docs/userDocs/yarn/WriteDockerfileMX.md    |  90 ---
 website/docs/userDocs/yarn/WriteDockerfilePT.md    | 114 ----
 website/docs/userDocs/yarn/WriteDockerfileTF.md    | 123 ----
 website/docs/userDocs/yarn/YARNRuntimeGuide.md     | 312 ----------
 .../base/ubuntu-18.04/Dockerfile.cpu.mx_latest     |  49 --
 .../base/ubuntu-18.04/Dockerfile.gpu.mx_latest     |  49 --
 .../docs/userDocs/yarn/docker/mxnet/build-all.sh   |  25 -
 .../mxnet/cifar10/Dockerfile.cifar10.mx_1.5.1      |  62 --
 .../ubuntu-18.04/Dockerfile.gpu.pytorch_latest     |  77 ---
 .../docs/userDocs/yarn/docker/pytorch/build-all.sh |  30 -
 .../with-cifar10-models/cifar10_tutorial.py        | 348 -----------
 .../ubuntu-18.04/Dockerfile.gpu.pytorch_latest     |  21 -
 .../base/ubuntu-18.04/Dockerfile.cpu.tf_1.13.1     |  75 ---
 .../base/ubuntu-18.04/Dockerfile.gpu.tf_1.13.1     |  89 ---
 .../userDocs/yarn/docker/tensorflow/build-all.sh   |  35 --
 .../mnist/Dockerfile.tony.tf.mnist.tf_1.13.1       |  72 ---
 .../ubuntu-18.04/Dockerfile.cpu.tf_1.13.1          |  22 -
 .../ubuntu-18.04/Dockerfile.gpu.tf_1.13.1          |  22 -
 .../cifar10_estimator_tf_1.13.1/README.md          | 542 ----------------
 .../cifar10_estimator_tf_1.13.1/cifar10.py         | 112 ----
 .../cifar10_estimator_tf_1.13.1/cifar10_main.py    | 519 ----------------
 .../cifar10_estimator_tf_1.13.1/cifar10_model.py   |  76 ---
 .../cifar10_estimator_tf_1.13.1/cifar10_utils.py   | 156 -----
 .../generate_cifar10_tfrecords.py                  | 115 ----
 .../cifar10_estimator_tf_1.13.1/model_base.py      | 207 -------
 .../zeppelin-notebook-example/Dockerfile.gpu       |  75 ---
 .../zeppelin-notebook-example/run_container.sh     |  22 -
 .../tensorflow/zeppelin-notebook-example/shiro.ini | 120 ----
 .../zeppelin-notebook-example/zeppelin-site.xml    | 569 -----------------
 website/sidebars.js                                |   5 -
 48 files changed, 6346 deletions(-)

diff --git a/website/docs/adminDocs/yarn/README.md b/website/docs/adminDocs/yarn/README.md
deleted file mode 100644
index cb5932c..0000000
--- a/website/docs/adminDocs/yarn/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-title: Running Submarine on YARN (deprecated)
----
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-Submarine for YARN supports TensorFlow, PyTorch and MXNet framework. (Which is leveraging [TonY](https://github.com/linkedin/TonY) created by Linkedin to run deep learning training jobs on YARN.
-
-Submarine also supports GPU-on-YARN and Docker-on-YARN feature.
-
-Submarine can run on Hadoop 2.7.3 or later version, if GPU-on-YARN or Docker-on-YARN feature is needed, newer Hadoop version is required, please refer to the next section about what Hadoop version to choose.
-
-## Hadoop version
-
-Must:
-
-- Apache Hadoop version newer than 2.7.3
-
-Optional:
-
-- When you want to use GPU-on-YARN feature with Submarine, please make sure Hadoop is at least 2.10.0+ (or 3.1.0+), and follow [Enable GPU on YARN 2.10.0+](https://hadoop.apache.org/docs/r2.10.0/hadoop-yarn/hadoop-yarn-site/UsingGpus.html) to enable GPU-on-YARN feature.
-- When you want to run training jobs with Docker container, please make sure Hadoop is at least 2.8.2, and follow [Enable Docker on YARN 2.8.2+](https://hadoop.apache.org/docs/r2.8.2/hadoop-yarn/hadoop-yarn-site/DockerContainers.html) to enable Docker-on-YARN feature.
-
-## Submarine YARN Runtime Guide
-
-[YARN Runtime Guide](../../userDocs/yarn/YARNRuntimeGuide) talk about how to use Submarine to run jobs on YARN, with Docker / without Docker.
diff --git a/website/docs/adminDocs/yarn/TestAndTroubleshooting.md b/website/docs/adminDocs/yarn/TestAndTroubleshooting.md
deleted file mode 100644
index 8b6ed5c..0000000
--- a/website/docs/adminDocs/yarn/TestAndTroubleshooting.md
+++ /dev/null
@@ -1,151 +0,0 @@
----
-title: Test and Troubleshooting
----
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-## Test with a tensorflow job
-
-Distributed-shell + GPU + cgroup
-
-```bash
- ... \
- job run \
- --env DOCKER_JAVA_HOME=/opt/java \
- --env DOCKER_HADOOP_HDFS_HOME=/hadoop-current --name distributed-tf-gpu \
- --env YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK=calico-network \
- --worker_docker_image tf-1.13.1-gpu:0.0.1 \
- --ps_docker_image tf-1.13.1-cpu:0.0.1 \
- --input_path hdfs://${dfs_name_service}/tmp/cifar-10-data \
- --checkpoint_path hdfs://${dfs_name_service}/user/hadoop/tf-distributed-checkpoint \
- --num_ps 0 \
- --ps_resources memory=4G,vcores=2,gpu=0 \
- --ps_launch_cmd "python /test/cifar10_estimator/cifar10_main.py --data-dir=hdfs://${dfs_name_service}/tmp/cifar-10-data --job-dir=hdfs://${dfs_name_service}/tmp/cifar-10-jobdir --num-gpus=0" \
- --worker_resources memory=4G,vcores=2,gpu=1 --verbose \
- --num_workers 1 \
- --worker_launch_cmd "python /test/cifar10_estimator/cifar10_main.py --data-dir=hdfs://${dfs_name_service}/tmp/cifar-10-data --job-dir=hdfs://${dfs_name_service}/tmp/cifar-10-jobdir --train-steps=500 --eval-batch-size=16 --train-batch-size=16 --sync --num-gpus=1"
-```
-
-
-
-## Issues:
-
-### Issue 1: Fail to start nodemanager after system reboot
-
-```
-2018-09-20 18:54:39,785 ERROR org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor: Failed to bootstrap configured resource subsystems!
-org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException: Unexpected: Cannot create yarn cgroup Subsystem:cpu Mount points:/proc/mounts User:yarn Path:/sys/fs/cgroup/cpu,cpuacct/hadoop-yarn
-  at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandlerImpl.initializePreMountedCGroupController(CGroupsHandlerImpl.java:425)
-  at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandlerImpl.initializeCGroupController(CGroupsHandlerImpl.java:377)
-  at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsCpuResourceHandlerImpl.bootstrap(CGroupsCpuResourceHandlerImpl.java:98)
-  at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsCpuResourceHandlerImpl.bootstrap(CGroupsCpuResourceHandlerImpl.java:87)
-  at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerChain.bootstrap(ResourceHandlerChain.java:58)
-  at org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor.init(LinuxContainerExecutor.java:320)
-  at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceInit(NodeManager.java:389)
-  at org.apache.hadoop.service.AbstractService.init(AbstractService.java:164)
-  at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:929)
-  at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:997)
-2018-09-20 18:54:39,789 INFO org.apache.hadoop.service.AbstractService: Service NodeManager failed in state INITED
-```
-
-Solution: Grant user yarn the access to  `/sys/fs/cgroup/cpu,cpuacct`, which is the subfolder of cgroup mount destination.
-
-```
-chown :yarn -R /sys/fs/cgroup/cpu,cpuacct
-chmod g+rwx -R /sys/fs/cgroup/cpu,cpuacct
-```
-
-If GPUs are used,the access to cgroup devices folder is neede as well
-
-```
-chown :yarn -R /sys/fs/cgroup/devices
-chmod g+rwx -R /sys/fs/cgroup/devices
-```
-
-
-### Issue 2: container-executor permission denied
-
-```
-2018-09-21 09:36:26,102 WARN org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor: IOException executing command:
-java.io.IOException: Cannot run program "/etc/yarn/sbin/Linux-amd64-64/container-executor": error=13, Permission denied
-        at java.lang.ProcessBuilder.start(ProcessBuilder.java:1048)
-        at org.apache.hadoop.util.Shell.runCommand(Shell.java:938)
-        at org.apache.hadoop.util.Shell.run(Shell.java:901)
-        at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1213)
-```
-
-Solution: The permission of `/etc/yarn/sbin/Linux-amd64-64/container-executor` should be 6050
-
-### Issue 3:How to get docker service log
-
-Solution: we can get docker log with the following command
-
-```
-journalctl -u docker
-```
-
-### Issue 4:docker can't remove containers with errors like `device or resource busy`
-
-```bash
-$ docker rm 0bfafa146431
-Error response from daemon: Unable to remove filesystem for 0bfafa146431771f6024dcb9775ef47f170edb2f1852f71916ba44209ca6120a: remove /app/docker/containers/0bfafa146431771f6024dcb9775ef47f170edb2f152f71916ba44209ca6120a/shm: device or resource busy
-```
-
-Solution: to find which process leads to a `device or resource busy`, we can add a shell script, named `find-busy-mnt.sh`
-
-```bash
-#!/usr/bin/env bash
-
-# A simple script to get information about mount points and pids and their
-# mount namespaces.
-
-if [ $# -ne 1 ];then
-echo "Usage: $0 <devicemapper-device-id>"
-exit 1
-fi
-
-ID=$1
-
-MOUNTS=`find /proc/*/mounts | xargs grep $ID 2>/dev/null`
-
-[ -z "$MOUNTS" ] &&  echo "No pids found" && exit 0
-
-printf "PID\tNAME\t\tMNTNS\n"
-echo "$MOUNTS" | while read LINE; do
-PID=`echo $LINE | cut -d ":" -f1 | cut -d "/" -f3`
-# Ignore self and thread-self
-if [ "$PID" == "self" ] || [ "$PID" == "thread-self" ]; then
-  continue
-fi
-NAME=`ps -q $PID -o comm=`
-MNTNS=`readlink /proc/$PID/ns/mnt`
-printf "%s\t%s\t\t%s\n" "$PID" "$NAME" "$MNTNS"
-done
-```
-
-Kill the process by pid, which is found by the script
-
-```bash
-$ chmod +x find-busy-mnt.sh
-./find-busy-mnt.sh 0bfafa146431771f6024dcb9775ef47f170edb2f152f71916ba44209ca6120a
-# PID   NAME            MNTNS
-# 5007  ntpd            mnt:[4026533598]
-$ kill -9 5007
-```
-
-### Issue 5:Yarn failed to start containers
-
-if the number of GPUs required by applications is larger than the number of GPUs in the cluster, there would be some containers can't be created.
diff --git a/website/docs/adminDocs/yarn/workbench/HowToRun.md b/website/docs/adminDocs/yarn/workbench/HowToRun.md
deleted file mode 100644
index cd76f39..0000000
--- a/website/docs/adminDocs/yarn/workbench/HowToRun.md
+++ /dev/null
@@ -1,103 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-# How To Run Submarine Workbench
-We provide two methods to launch Submarine Workbench
-*  Method 1:  Run Submarine Workbench on docker
-*  Method 2:  Run Submarine Workbench without docker
-
-# Run Submarine Workbench on docker
-
-By using the official images of Submarine, only a few docker commands are required to launch **Submarine Workbench**. The document includes information about how to launch the Submarine Workbench via the new docker images and the information about how to switch between different Submarine Workbench versions(version Vue & version Angular).
-
-### Two versions of Submarine Workbench
-1. Angular (default)
-2. Vue (This is the old version, and it will be replaced by version Angular in the future.)
-#### (WARNING: Please restart a new **incognito window** when you switch to different versions of Submarine Workbench)
-### Launch the Submarine Workbench(Angular)
-* It should be noted that since Submarine Workbench depends on the Submarine database, so you need to run the docker container of the Submarine database first.
-```
-docker run -it -p 3306:3306 -d --name submarine-database -e MYSQL_ROOT_PASSWORD=password apache/submarine:database-<REPLACE_VERSION>
-docker run -it -p 8080:8080 -d --link=submarine-database:submarine-database --name submarine-server apache/submarine:server-<REPLACE_VERSION>
-```
-* The login page of Submarine Workbench will be shown in ```http://127.0.0.1:8080```.
-### Check the data in the submarine-database
-*  Step1: Enter the submarine-database container
-```
-docker exec -it submarine-database bash
-```
-*  Step2: Enter MySQL database
-```
-mysql -uroot -ppassword
-```
-*  Step3: List the data in the table
-```
-// list all databases
-show databases;
-
-// choose a database
-use ${target_database};
-
-// list all tables
-show tables;
-
-// list the data in the table
-select * from ${target_table};
-```
-# Run Submarine Workbench without docker
-### Run Submarine Workbench
-
-```
-cd submarine
-./bin/submarine-daemon.sh [start|stop|restart]
-```
-To start workbench server, you need to download MySQL jdbc jar and put it in the
-path of workbench/lib for the first time. Or you can add parameter, getMysqlJar,
-to get MySQL jar automatically.
-```
-cd submarine
-./bin/submarine-daemon.sh start getMysqlJar
-```
-
-### submarine-env.sh
-
-`submarine-env.sh` is automatically executed each time the `submarine-daemon.sh` script is executed, so we can set the `submarine-daemon.sh` script and the environment variables in the `SubmarineServer` process via `submarine-env.sh`.
-
-| Name                | Variable                                                     |
-| ------------------- | ------------------------------------------------------------ |
-| JAVA_HOME           | Set your java home path, default is `java`.                  |
-| SUBMARINE_JAVA_OPTS | Set the JAVA OPTS parameter when the Submarine Workbench process starts. If you need to debug the Submarine Workbench process, you can set it to `-agentlib:jdwp=transport=dt_socket, server=y,suspend=n,address=5005` |
-| SUBMARINE_MEM       | Set the java memory parameter when the Submarine Workbench process starts. |
-| MYSQL_JAR_URL       | The customized URL to download MySQL jdbc jar.               |
-| MYSQL_VERSION       | The version of MySQL jdbc jar to downloaded. The default value is 5.1.39. It's used to generate the default value of MYSQL_JDBC_URL |
-
-### submarine-site.xml
-
-`submarine-site.xml` is the configuration file for the entire `Submarine` system to run.
-
-| Name                               | Variable                                                     |
-| ---------------------------------- | ------------------------------------------------------------ |
-| submarine.server.addr              | Submarine server address, default is `0.0.0.0`               |
-| submarine.server.port              | Submarine server port, default `8080`                        |
-| submarine.ssl                      | Should SSL be used by the Submarine servers?, default `false` |
-| submarine.server.ssl.port          | Server ssl port. (used when ssl property is set to true), default `8483` |
-| submarine.ssl.client.auth          | Should client authentication be used for SSL connections?    |
-| submarine.ssl.keystore.path        | Path to keystore relative to Submarine configuration directory |
-| submarine.ssl.keystore.type        | The format of the given keystore (e.g. JKS or PKCS12)        |
-| submarine.ssl.keystore.password    | Keystore password. Can be obfuscated by the Jetty Password tool |
-| submarine.ssl.key.manager.password | Key Manager password. Defaults to keystore password. Can be obfuscated. |
-| submarine.ssl.truststore.path      | Path to truststore relative to Submarine configuration directory. Defaults to the keystore path |
-| submarine.ssl.truststore.type      | The format of the given truststore (e.g. JKS or PKCS12). Defaults to the same type as the keystore type |
-| submarine.ssl.truststore.password  | Truststore password. Can be obfuscated by the Jetty Password tool. Defaults to the keystore password |
-| workbench.web.war                  | Submarine Workbench web war file path.                       |
diff --git a/website/docs/adminDocs/yarn/workbench/README.md b/website/docs/adminDocs/yarn/workbench/README.md
deleted file mode 100644
index 6d2730d..0000000
--- a/website/docs/adminDocs/yarn/workbench/README.md
+++ /dev/null
@@ -1,177 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-English | [简体中文](./README.zh-CN.md)
-
-# Submarine Workbench Introduction
-
-Submarine Workbench is a WEB system for data scientists.
-Data scientists can interactively access notebooks, submit/manage jobs, manage models, create model training workflows, access data sets, and more through Submarine Workbench.
-
-## Register
-
-Everyone who needs to use Submarine for machine learning algorithm development can log in to Submarine Workbench's WEB homepage. On the homepage, click the registration link, fill in the user name, email address and password to register the user. At this time, the user status is `waiting for approval` status.
-
-After receiving the registration request from the user in Submarine Workbench, the administrator sets the operation authority according to the user's needs, sets the user's organization and allocates resources, and sets the user status to `pass the audit`. The user can log in to the Submarine Workbench. Different users have different permission.
-
-## Login
-
-Each Submarine user logs in to the Home page of Submarine Workbench by entering their username and password on the Login page.
-
-## Home
-
-In the Submarine Workbench Home page, the top level shows the user's resource usage and task execution through four charts.
-
-In the `Quick Start` list, the most commonly used feature links in the Workbench are displayed so that users can work quickly.
-
-In the `Open Recent` list, there are nine items that the user has used recently, so you can work quickly.
-
-At `What's New?` In the list, some of the latest features and project information released by Submarine are displayed to help you understand the latest developments in the Submarine project.
-
-## Workspace
-
-Workspace consists primarily of five tab pages, with the total number of items in each tab page's title.
-
-### Project
-
-In the Project page, all the projects created by the user themselves are displayed as cards.
-
-![image-20191007161424534](/img/workspace/workspace-project.png)
-
-Each Project card consists of the following sections:
-
-1. **Project Type**:Submarine currently supports six types of machine learning algorithm frameworks and development languages: `Notebook`, `Python`, `R`, `Scala`, `Tensorflow`, and `PyTorch`, which are identified by corresponding icons in the project card.
-2. **Project Tags**:Users can tag each Project with different tags for easy searching and management.
-3. **Github/Gitlab integrated**:Submarine Workbench is system integrated with `Github`/`Gitlab`, and each Project can perform `Watch`, `Star`, `Fork`, and `Comment `operations in Workbench.
-   + **Watch**:[TODO]
-   + **Star**:[TODO]
-   + **Fork**:[TODO]
-   + **Comment**:Users can comment on the project.
-4. **Edit**:Users can open projects in **Notebook** and perform algorithm development by double-clicking on the project or by clicking the **Edit** button.
-5. **Download**:The user downloads the project package locally by clicking the **Download** button.
-6. **Setting**:Edit project information such as project name, profile, visibility level and permissions.
-7. **Delete**:Delete the project and all included files.
-
-#### Add New Project
-
-Clicking the **Add New Project** button on the project page will display the guide page for creating the project, and you can create a new project in just three steps.
-
-**Step 1**: Fill in the project name and project description in the **Base Information** step.
-
-![image-20191007171638338](/img/workspace/workspace-project-step1.png)
-
-+ **Visibility**: Set the visibility level of the item externally
-
-  + **Private**: (Default) Set to private project, and all the files included in the project are not publicly displayed. but the execution result of the project can be individually set and exposed in Notebook, so that others can view the visual report of the project.
-  + **Team**: Set to team project, select the team name in the team selection box, and other members of the team can access the project according to the set permissions.
-  + **Public**: Set to public project, all users in Workbench can view this project through search.
-+ **Permission**: Set the external access rights of the project. The permission setting interface will appear only when the **Visibility** of the project is set to **Team** or **Public**.
-
-  + **Can View**
-
-    When the project's **Visibility** is set to **Team**, other members of the team can **only view** the files for this project.
-
-    When the project's **Visibility** is set to **Public**, other members of the Workbench can **only view** the files for this project.
-
-  + **Can Edit**
-
-    When the project's **Visibility** is set to **Team**, other members of the team can **view** and **edit** the files for this project.
-
-    When the project's **Visibility** is set to **Public**, other members of the Workbench can **view** and **edit** the files for this project.
-
-  + **Can Execute**
-
-    When the project's **Visibility** is set to **Team**, other members of the team can **view**, **edit**, and **execute** the project's files.
-
-    When the project's **Visibility** is set to **Public**, other members of the Workbench can **view**, **edit**, and **execute** the project's files.
-
-**Step 2**: In the Initial Project step, Workbench provides four ways to initialize the project.
-
-+ **Template**: Workbench Project templates with several different development languages and algorithm frameworks are built in. You can choose any template to initialize your project and you can execute it directly in Notebook without any modification. It is especially suitable for novices to experience quickly.
-
-  ![image-20191007184749193](/img/workspace/workspace-project-step2-template.png)
-
-+ **Blank**:Create a blank project, and later we can manually add the project's file in Notebook
-
-  ![image-20191007184811389](/img/workspace/workspace-project-step2-blank.png)
-
-+ **Upload**: Initialize your project by uploading a file in notebook format that is compatible with the **Jupyter Notebook** and **Zeppelin Notebook** file formats.
-
-  ![image-20191007184825531](/img/workspace/workspace-project-step2-upload.png)
-
-+ **Git Repo**: Fork a file in the repository to initialize the project in your **Github**/**Gitlab** account.
-
-  ![image-20191007184840989](/img/workspace/workspace-project-step2-git.png)
-
-**Step 3**:Preview the included files in the project
-
-![image-20191007191205660](/img/workspace/workspace-project-step3.png)
-
-+ **Save**: Save the project to Workspace.
-+ **Open In Notebook**: Save the project to Workspace and open the project with Notebook.
-
-### Release
-
-[TODO]
-
-### Training
-
-[TODO]
-
-### Team
-
-[TODO]
-
-### Shared
-
-[TODO]
-
-## Interpreters
-
-[TODO]
-
-## Job
-
-[TODO]
-
-## Data
-
-[TODO]
-
-## Model
-
-[TODO]
-
-## Manager
-
-### User
-
-[TODO]
-
-### Team
-
-[TODO]
-
-### Data Dict
-
-[TODO]
-
-### Department
-
-[TODO]
-
-## How to run workbench
-
-[How To Run Submarine Workbench Guide](./HowToRun.md)
diff --git a/website/docs/adminDocs/yarn/workbench/README.zh-CN.md b/website/docs/adminDocs/yarn/workbench/README.zh-CN.md
deleted file mode 100644
index 8aaf010..0000000
--- a/website/docs/adminDocs/yarn/workbench/README.zh-CN.md
+++ /dev/null
@@ -1,176 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-[English](./README.md) | 简体中文
-
-# Submarine Workbench Introduction
-
-`Submarine Workbench` 是为数据科学家设计的 WEB 系统。数据科学家可以通过 `Submarine Workbench` 进行交互式的访问 `Notebook`,提交/管理 Job,管理模型,创建模型训练工作流,访问数据集等。
-
-## Register
-
-每个需要使用 Submarine 进行机器学习算法开发的用户,都可以登录 `Submarine Workbench` 的 WEB 首页,在首页上,点击注册链接,填写用户名、注册邮箱和密码就可以完成注册,但此时用户状态为 `等待审核` 状态。
-
-管理员在  `Submarine Workbench` 中接收到用户的注册请求后,设置用户的操作权限,所属机构部门和分配资源,设置用户状态为 `审核通过` 后,用户才可以登录 Submarine Workbench。
-
-## Login
-
-每个 Submarine 的用户在 `Login` 页面中输入用户名和密码,登录到  `Submarine Workbench` 的首页 `Home`。
-
-## Home
-
-在 `Submarine Workbench` 的 `Home` 首页中,顶层通过四个图表显示了用户的资源的使用情况和任务执行的情况。
-
-在 `Quick Start` 列表中,显示了 Workbench 中最常使用的功能链接,方便用户可以快速的进行工作。
-
-在 `Open Recent` 列表中,显示了用户最近使用过的九个项目,方便你快速的进行工作。
-
-在 `What‘s New?` 列表中,显示了 Submarine 最新发布的一些功能特性和项目信息,方便你了解 Submarine 项目的最新进展。
-
-## Workspace
-
-Workspace 主要有五个 Tab 页组成,每个 Tab 页的标题中显示了各自项目的总数。
-
-### Project
-
-在 Project 页面中,以卡片的方式显示了用户自己创建的所有 Project。
-
-![image-20191007161424534](/img/workspace/workspace-project.png)
-
-每个 Project 卡片由以下部分内容组成:
-
-1. **Project 类型**:目前 Submarine 支持 `Notebook`、`Python`、`R`、`Scala`、`Tensorflow` 和 `PyTorch` 这六种类型的机器学习算法框架和开发语言,在项目卡片中以对应的图标进行标识。
-2. **Project Tags**:用户可以为每个 Project 打上不同的 `Tag` 标签,方便查找和管理。
-3. **Github/Gitlab 集成**:Submarine Workbench 与 `Github`/`Gitlab` 进行了系统集成,每个 Project 都可以在 Workbench 中进行 `Watch`、`Star`、`Frok` 和 `Comment` 操作。
-   + **Watch**:[TODO]
-   + **Star**:[TODO]
-   + **Fork**:[TODO]
-   + **Comment**:用户可以在项目中进行评论
-4. **Edit**:用户通过双击项目或者点击 `Edit` 按钮,可以在 `Notebook` 中打开项目,进行算法开发等操作。
-5. **Download**:用户通过点击 `Download` 按钮,将项目打包下载到本地。
-6. **Setting**:编辑项目信息,例如项目的名字,简介,分享级别和权限。
-7. **Delete**:删除项目中所有包含的文件。
-
-#### Add New Project
-
-在项目页面中点击 `Add New Project` 按钮,将会显示出创建项目的引导页面,只需要三个步骤就可以创建一个新的项目。
-
-第一步:在 **Base Information** 步骤中填写项目名称、项目简介。
-
-![image-20191007171638338](/img/workspace/workspace-project-step1.png)
-
-+ **Visibility**: 设置项目对外的可见级别
-
-  + **Private**: (默认)设置为私有项目,不对外公开项目中包含的所有文件,但是可以在 **Notebook** 中将项目的执行结果单独设置公开,方便其他人查看项目的可视化报告。
-  + **Team**: 设置为团队项目,在团队选择框中选择团队的名称,团队的其他成员可以根据设置的权限访问这个项目。
-  + **Public**: 设置为公开项目,**Workbench** 中的所有用户都可以通过搜索查看到这个项目。
-+ **Permission**: 设置项目对外的访问权限,只有将项目的 **Visibility** 设置为 **Team** 或 **Public** 的时候,才会出现权限设置界面。
-
-  + **Can View**
-
-    当项目的 **Visibility** 设置为 **Team** 时,团队中其他成员都只能**查看**这个项目的文件。
-
-    当项目的 Visibility 设置为 **Public** 时,**Workbench** 中其他成员都只能**查看**这个项目的文件。
-
-  + **Can Edit**
-
-    当项目的 **Visibility** 设置为 **Team** 时,团队中其他成员都可以**查看**、**编辑**这个项目的文件。
-
-    当项目的 **Visibility** 设置为 **Public** 时,**Workbench** 中其他成员都可以**查看**、**编辑**这个项目的文件。
-
-  + **Can Execute**
-
-    当项目的 **Visibility** 设置为 **Team** 时,团队中其他成员都可以**查看**、**编辑**、**执行**这个项目的文件。
-
-    当项目的 **Visibility** 设置为 **Public** 时,**Workbench** 中其他成员都可以**查看**、**编辑**、**执行**这个项目的文件。
-
-第二步:在 **Initial Project** 步骤中,**Workbench** 提供了四种项目初始化的方式
-
-+ **Template**: **Workbench** 内置了几种不同开发语言和算法框架的项目模版,你可以选择任何一种模版初始化你的项目,无需做任何修改就可以直接在 **Notebook** 中执行,特别适合新手进行快速的体验。
-
-  ![image-20191007184749193](/img/workspace/workspace-project-step2-template.png)
-
-+ **Blank**:创建一个空白的项目,稍后,我们可以通过在 **Notebook** 中手工添加项目的文件
-
-  ![image-20191007184811389](/img/workspace/workspace-project-step2-blank.png)
-
-+ **Upload**: 通过上传 **notebook** 格式的文件来初始化你的项目,**notebook** 格式兼容 **Jupyter Notebook** 和 **Zeppelin Notebook** 文件格式。
-
-  ![image-20191007184825531](/img/workspace/workspace-project-step2-upload.png)
-
-+ **Git Repo**: 在你的 **Github**/**Gitlab** 账号中 **Fork** 一个仓库中的文件内容来初始化项目。
-
-  ![image-20191007184840989](/img/workspace/workspace-project-step2-git.png)
-
-第三步:预览项目中的所包含的文件
-
-![image-20191007191205660](/img/workspace/workspace-project-step3.png)
-
-+ **Save**: 将项目保存到 Workspace 中。
-+ **Open In Notebook**: 将项目保存到 **Workspace** 中,并用 **Notebook** 打开项目。
-
-### Release
-
-[TODO]
-
-### Training
-
-[TODO]
-
-### Team
-
-[TODO]
-
-### Shared
-
-[TODO]
-
-## Interpreters
-
-[TODO]
-
-## Job
-
-[TODO]
-
-## Data
-
-[TODO]
-
-## Model
-
-[TODO]
-
-## Manager
-
-### User
-
-[TODO]
-
-### Team
-
-[TODO]
-
-### Data Dict
-
-[TODO]
-
-### Department
-
-[TODO]
-
-## How to run workbench
-
-[How To Run Submarine Workbench Guide](./HowToRun.md)
diff --git a/website/docs/adminDocs/yarn/workbench/notebook/jupyter.yaml b/website/docs/adminDocs/yarn/workbench/notebook/jupyter.yaml
deleted file mode 100644
index 32d065e..0000000
--- a/website/docs/adminDocs/yarn/workbench/notebook/jupyter.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-kind: Deployment
-apiVersion: extensions/v1beta1
-metadata:
-  name: jupyter-deployment
-  labels:
-    app: jupyter
-spec:
-  selector:
-    matchLabels:
-      app: jupyter
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        namespace: submarine
-        app: jupyter
-    spec:
-      nodeSelector:
-        kubernetes.io/hostname: k8s-submarine-control-plane
-      containers:
-        - name: jupyter
-          image: jupyter/minimal-notebook
-          # mount path in jupyter container
-          volumeMounts:
-            - mountPath: /home/jovyan
-              name: jupyter-storage
-      volumes:
-        - name: jupyter-storage
-          # this path must be the same as extraMounts.containerPath in the kind config
-          hostPath:
-            path: /tmp/submarine/
-            type: DirectoryOrCreate
----
-kind: Service
-apiVersion: v1
-metadata:
-  name: jupyter-service
-spec:
-  selector:
-    app: jupyter
-  type: NodePort
-  ports:
-    - port: 8888
-      targetPort: 8888
-      nodePort: 30070
diff --git a/website/docs/adminDocs/yarn/workbench/notebook/setup-jupyter.md b/website/docs/adminDocs/yarn/workbench/notebook/setup-jupyter.md
deleted file mode 100644
index e15ffdc..0000000
--- a/website/docs/adminDocs/yarn/workbench/notebook/setup-jupyter.md
+++ /dev/null
@@ -1,73 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-# Deploy Jupyter Notebook on Kubernetes
-This guide covers the deployment Jupyter Notebook on kubernetes cluster.
-
-## Experiment environment
-### Setup Kubernetes
-We recommend using [kind](https://kind.sigs.k8s.io/) to setup a Kubernetes cluster on a local machine.
-
-You can use Extra mounts to mount your host path to kind node and use Extra port mappings to port
-forward to the kind nodes. Please refer to [kind configuration](https://kind.sigs.k8s.io/docs/user/configuration/#extra-mounts)
-for more details.
-
-You need to create a kind config file. The following is an example :
-```
-kind: Cluster
-apiVersion: kind.x-k8s.io/v1alpha4
-nodes:
-- role: control-plane
-  extraMounts:
-  # add a mount from /path/to/my/files on the host to /files on the node
-  - hostPath: /tmp/submarine
-    containerPath: /tmp/submarine
-  extraPortMappings:
-  - containerPort: 80
-    hostPort: 80
-    protocol: TCP
-  # exposing additional ports to be used for NodePort services
-  - containerPort: 30070
-    hostPort: 8888
-    protocol: TCP
-```
-
-Running the following command:
-
-```
-kind create cluster --image kindest/node:v1.15.6 --config <path-to-kind-config> --name k8s-submarine
-kubectl create namespace submarine
-```
-
-### Deploy Jupyter Notebook
-Once you have a running Kubernetes cluster, you can write a YAML file to deploy a jupyter notebook.
-In this [example yaml](./jupyter.yaml), we use [jupyter/minimal-notebook](https://hub.docker.com/r/jupyter/minimal-notebook/)
-to make a single notebook running on the kind node.
-
-```
-kubectl apply -f jupyter.yaml --namespace submarine
-```
-
-Once jupyter notebook is running, you can access the notebook server from the browser using http://localhost:8888 on local machine.
-
-You can enter and store a password for your notebook server with:
-```
-kubectl exec -it <jupyter-pod-name> -- jupyter notebook password
-```
-After restarting the notebook server,  you can login jupyter notebook with your new password.
-
-If you want to use JupyterLab :
-```
-http://localhost:8888/lab
-```
diff --git a/website/docs/ecosystem/kaldi/RunningDistributedThchs30KaldiJobs.md b/website/docs/ecosystem/kaldi/RunningDistributedThchs30KaldiJobs.md
deleted file mode 100644
index a0ca769..0000000
--- a/website/docs/ecosystem/kaldi/RunningDistributedThchs30KaldiJobs.md
+++ /dev/null
@@ -1,678 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-# Thchs30 Kaldi Example With YARN Service
-
-## Prepare data for training
-
-Thchs30 is a common benchmark in machine learning for speech data and transcripts. Below example is based on Thchs30 dataset.
-
-1) download gz file:
-```
-THCHS30_PATH=/data/hdfs1/nfs/aisearch/kaldi/thchs30
-mkdir $THCHS30_PATH/data && cd $THCHS30_PATH/data
-wget http://www.openslr.org/resources/18/data_thchs30.tgz
-wget http://www.openslr.org/resources/18/test-noise.tgz
-wget http://www.openslr.org/resources/18/resource.tgz
-```
-
-2) Checkout https://github.com/apache/submarine.git:
-```
-git clone https://github.com/apache/submarine.git
-```
-
-3) Go to `submarine/docker/ecosystem/`
-```
-cp -r ./kaldi/sge $THCHS30_PATH/sge
-```
-
-4) optional,Modify `/opt/kaldi/egs/thchs30/s5/cmd.sh` in the Container,This queue is used by default
-```
-export train_cmd="queue.pl -q all.q"
-```
-
-**Warning:**
-
-Please note that YARN service doesn't allow multiple services with the same name, so please run following command
-```
-yarn application -destroy <service-name>
-```
-to delete services if you want to reuse the same service name.
-
-## Prepare Docker images
-
-Refer to [Write Dockerfile](WriteDockerfileKaldi.md) to build a Docker image or use prebuilt one:
-
-- hadoopsubmarine/kaldi-latest-gpu-base:0.0.1
-
-## Run Kaldi jobs
-
-### Run distributed training
-
-```
-# Change the variables according to your needs
-SUBMARINE_VERSION=3.3.0-SNAPSHOT
-WORKER_NUM=2
-SGE_CFG_PATH=/cfg
-THCHS30_PATH=/data/hdfs1/nfs/aisearch/kaldi/thchs30
-DOCKER_HADOOP_HDFS_HOME=/app/${SUBMARINE_VERSION}
-
-# Dependent on registrydns, you must fill in < your RegistryDNSIP> in resolv.conf
-yarn jar /usr/local/matrix/share/hadoop/yarn/${SUBMARINE_VERSION}.jar \
-job run --name kaldi-thchs30-distributed \
---env DOCKER_JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/jre/ \
---env DOCKER_HADOOP_HDFS_HOME=$DOCKER_HADOOP_HDFS_HOME \
---env YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK=calico-network \
---env PYTHONUNBUFFERED="0" \
---env TZ="Asia/Shanghai" \
---env YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS=${THCHS30_PATH}/sge/resolv.conf:/etc/resolv.conf,\
-${THCHS30_PATH}/sge/passwd:/etc/passwd:rw,\
-${THCHS30_PATH}/sge/group:/etc/group:rw,\
-${THCHS30_PATH}/sge:$SGE_CFG_PATH,\
-${THCHS30_PATH}/data:/opt/kaldi/egs/thchs30,\
-${THCHS30_PATH}/mul/s5:/opt/kaldi/egs/mul-thchs30/s5 \
---input_path /opt/kaldi/egs/thchs30/data \
---docker_image hadoopsubmarine/kaldi-latest-gpu-base:0.0.1 \
---num_workers $WORKER_NUM \
---worker_resources memory=64G,vcores=32,gpu=1 \
---worker_launch_cmd "sudo mkdir -p /opt/kaldi/egs/mul-thchs30/s5 && \
-sudo cp /opt/kaldi/egs/thchs30/s5/* /opt/kaldi/egs/mul-thchs30/s5 -r && \
-cluster_user=`whoami` domain_suffix="ml.com" && \
-cd /cfg && bash sge_run.sh $WORKER_NUM $SGE_CFG_PATH && \
-if [ $(echo $HOST_NAME |grep "^master-") ] then sleep 2m && cd /opt/kaldi/egs/mul-thchs30/s5 && ./run.sh fi" \
---verbose
-```
-
-Explanations:
-
-- `>1` num_workers indicates it is a distributed training.
-- Parameters / resources / Docker image of parameter server can be specified separately. For many cases, parameter server doesn't require GPU.We don't need parameter server here
-
-For the meaning of the individual parameters, see the [QuickStart](../../adminDocs/yarn/README) page!
-
-*Outputs of distributed training*
-
-Sample output of master:
-```
-...
-Reading package lists...
-Building dependency tree...
-Reading state information...
-The following additional packages will be installed:
-  bsd-mailx cpio gridengine-common ifupdown iproute2 isc-dhcp-client
-  isc-dhcp-common libatm1 libdns-export162 libisc-export160 liblockfile-bin
-  liblockfile1 libmnl0 libxmuu1 libxtables11 ncurses-term netbase
-  openssh-client openssh-server openssh-sftp-server postfix python3-chardet
-  python3-pkg-resources python3-requests python3-six python3-urllib3
-  ssh-import-id ssl-cert tcsh xauth
-Suggested packages:
-  libarchive1 gridengine-qmon ppp rdnssd iproute2-doc resolvconf avahi-autoipd
-  isc-dhcp-client-ddns apparmor ssh-askpass libpam-ssh keychain monkeysphere
-  rssh molly-guard ufw procmail postfix-mysql postfix-pgsql postfix-ldap
-  postfix-pcre sasl2-bin libsasl2-modules dovecot-common postfix-cdb
-  postfix-doc python3-setuptools python3-ndg-httpsclient python3-openssl
-  python3-pyasn1 openssl-blacklist
-The following NEW packages will be installed:
-  bsd-mailx cpio gridengine-client gridengine-common gridengine-exec
-  gridengine-master ifupdown iproute2 isc-dhcp-client isc-dhcp-common libatm1
-  libdns-export162 libisc-export160 liblockfile-bin liblockfile1 libmnl0
-  libxmuu1 libxtables11 ncurses-term netbase openssh-client openssh-server
-  openssh-sftp-server postfix python3-chardet python3-pkg-resources
-  python3-requests python3-six python3-urllib3 ssh-import-id ssl-cert tcsh
-  xauth
-0 upgraded, 33 newly installed, 0 to remove and 30 not upgraded.
-Need to get 12.1 MB of archives.
-After this operation, 65.8 MB of additional disk space will be used.
-Get:1 http://archive.ubuntu.com/ubuntu xenial/main amd64 libatm1 amd64 1:2.5.1-1.5 [24.2 kB]
-Get:2 http://archive.ubuntu.com/ubuntu xenial/main amd64 libmnl0 amd64 1.0.3-5 [12.0 kB]
-Get:3 http://archive.ubuntu.com/ubuntu xenial/main amd64 liblockfile-bin amd64 1.09-6ubuntu1 [10.8 kB]
-Get:4 http://archive.ubuntu.com/ubuntu xenial/main amd64 liblockfile1 amd64 1.09-6ubuntu1 [8056 B]
-Get:5 http://archive.ubuntu.com/ubuntu xenial/main amd64 cpio amd64 2.11+dfsg-5ubuntu1 [74.8 kB]
-Get:6 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 iproute2 amd64 4.3.0-1ubuntu3.16.04.5 [523 kB]
-Get:7 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 ifupdown amd64 0.8.10ubuntu1.4 [54.9 kB]
-Get:8 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libisc-export160 amd64 1:9.10.3.dfsg.P4-8ubuntu1.15 [153 kB]
-Get:9 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libdns-export162 amd64 1:9.10.3.dfsg.P4-8ubuntu1.15 [665 kB]
-Get:10 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 isc-dhcp-client amd64 4.3.3-5ubuntu12.10 [224 kB]
-Get:11 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 isc-dhcp-common amd64 4.3.3-5ubuntu12.10 [105 kB]
-Get:12 http://archive.ubuntu.com/ubuntu xenial/main amd64 libxtables11 amd64 1.6.0-2ubuntu3 [27.2 kB]
-Get:13 http://archive.ubuntu.com/ubuntu xenial/main amd64 netbase all 5.3 [12.9 kB]
-Get:14 http://archive.ubuntu.com/ubuntu xenial/main amd64 libxmuu1 amd64 2:1.1.2-2 [9674 B]
-Get:15 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 openssh-client amd64 1:7.2p2-4ubuntu2.8 [590 kB]
-Get:16 http://archive.ubuntu.com/ubuntu xenial/main amd64 xauth amd64 1:1.0.9-1ubuntu2 [22.7 kB]
-Get:17 http://archive.ubuntu.com/ubuntu xenial/main amd64 ssl-cert all 1.0.37 [16.9 kB]
-Get:18 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 postfix amd64 3.1.0-3ubuntu0.3 [1152 kB]
-Get:19 http://archive.ubuntu.com/ubuntu xenial/main amd64 bsd-mailx amd64 8.1.2-0.20160123cvs-2 [63.7 kB]
-Get:20 http://archive.ubuntu.com/ubuntu xenial/universe amd64 gridengine-common all 6.2u5-7.4 [156 kB]
-Get:21 http://archive.ubuntu.com/ubuntu xenial/universe amd64 gridengine-client amd64 6.2u5-7.4 [3394 kB]
-Get:22 http://archive.ubuntu.com/ubuntu xenial/universe amd64 tcsh amd64 6.18.01-5 [410 kB]
-Get:23 http://archive.ubuntu.com/ubuntu xenial/universe amd64 gridengine-exec amd64 6.2u5-7.4 [990 kB]
-Get:24 http://archive.ubuntu.com/ubuntu xenial/universe amd64 gridengine-master amd64 6.2u5-7.4 [2429 kB]
-Get:25 http://archive.ubuntu.com/ubuntu xenial/main amd64 ncurses-term all 6.0+20160213-1ubuntu1 [249 kB]
-Get:26 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 openssh-sftp-server amd64 1:7.2p2-4ubuntu2.8 [38.9 kB]
-Get:27 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 openssh-server amd64 1:7.2p2-4ubuntu2.8 [335 kB]
-Get:28 http://archive.ubuntu.com/ubuntu xenial/main amd64 python3-pkg-resources all 20.7.0-1 [79.0 kB]
-Get:29 http://archive.ubuntu.com/ubuntu xenial/main amd64 python3-chardet all 2.3.0-2 [96.2 kB]
-Get:30 http://archive.ubuntu.com/ubuntu xenial/main amd64 python3-six all 1.10.0-3 [11.0 kB]
-Get:31 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 python3-urllib3 all 1.13.1-2ubuntu0.16.04.3 [58.5 kB]
-Get:32 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 python3-requests all 2.9.1-3ubuntu0.1 [55.8 kB]
-Get:33 http://archive.ubuntu.com/ubuntu xenial/main amd64 ssh-import-id all 5.5-0ubuntu1 [10.2 kB]
-Fetched 12.1 MB in 0s (15.0 MB/s)
-Selecting previously unselected package libatm1:amd64.
-(Reading database ...
-(Reading database ... 5%
-(Reading database ... 10%
-(Reading database ... 15%
-(Reading database ... 20%
-(Reading database ... 25%
-(Reading database ... 30%
-(Reading database ... 35%
-(Reading database ... 40%
-(Reading database ... 45%
-(Reading database ... 50%
-(Reading database ... 55%
-(Reading database ... 60%
-(Reading database ... 65%
-(Reading database ... 70%
-(Reading database ... 75%
-(Reading database ... 80%
-(Reading database ... 85%
-(Reading database ... 90%
-(Reading database ... 95%
-(Reading database ... 100%
-(Reading database ... 21398 files and directories currently installed.)
-Preparing to unpack .../libatm1_1%3a2.5.1-1.5_amd64.deb ...
-Unpacking libatm1:amd64 (1:2.5.1-1.5) ...
-Selecting previously unselected package libmnl0:amd64.
-Preparing to unpack .../libmnl0_1.0.3-5_amd64.deb ...
-Unpacking libmnl0:amd64 (1.0.3-5) ...
-Selecting previously unselected package liblockfile-bin.
-Preparing to unpack .../liblockfile-bin_1.09-6ubuntu1_amd64.deb ...
-Unpacking liblockfile-bin (1.09-6ubuntu1) ...
-Selecting previously unselected package liblockfile1:amd64.
-Preparing to unpack .../liblockfile1_1.09-6ubuntu1_amd64.deb ...
-Unpacking liblockfile1:amd64 (1.09-6ubuntu1) ...
-Selecting previously unselected package cpio.
-Preparing to unpack .../cpio_2.11+dfsg-5ubuntu1_amd64.deb ...
-Unpacking cpio (2.11+dfsg-5ubuntu1) ...
-Selecting previously unselected package iproute2.
-Preparing to unpack .../iproute2_4.3.0-1ubuntu3.16.04.5_amd64.deb ...
-Unpacking iproute2 (4.3.0-1ubuntu3.16.04.5) ...
-Selecting previously unselected package ifupdown.
-Preparing to unpack .../ifupdown_0.8.10ubuntu1.4_amd64.deb ...
-Unpacking ifupdown (0.8.10ubuntu1.4) ...
-Selecting previously unselected package libisc-export160.
-Preparing to unpack .../libisc-export160_1%3a9.10.3.dfsg.P4-8ubuntu1.15_amd64.deb ...
-Unpacking libisc-export160 (1:9.10.3.dfsg.P4-8ubuntu1.15) ...
-Selecting previously unselected package libdns-export162.
-Preparing to unpack .../libdns-export162_1%3a9.10.3.dfsg.P4-8ubuntu1.15_amd64.deb ...
-Unpacking libdns-export162 (1:9.10.3.dfsg.P4-8ubuntu1.15) ...
-Selecting previously unselected package isc-dhcp-client.
-Preparing to unpack .../isc-dhcp-client_4.3.3-5ubuntu12.10_amd64.deb ...
-Unpacking isc-dhcp-client (4.3.3-5ubuntu12.10) ...
-Selecting previously unselected package isc-dhcp-common.
-Preparing to unpack .../isc-dhcp-common_4.3.3-5ubuntu12.10_amd64.deb ...
-Unpacking isc-dhcp-common (4.3.3-5ubuntu12.10) ...
-Selecting previously unselected package libxtables11:amd64.
-Preparing to unpack .../libxtables11_1.6.0-2ubuntu3_amd64.deb ...
-Unpacking libxtables11:amd64 (1.6.0-2ubuntu3) ...
-Selecting previously unselected package netbase.
-Preparing to unpack .../archives/netbase_5.3_all.deb ...
-Unpacking netbase (5.3) ...
-Selecting previously unselected package libxmuu1:amd64.
-Preparing to unpack .../libxmuu1_2%3a1.1.2-2_amd64.deb ...
-Unpacking libxmuu1:amd64 (2:1.1.2-2) ...
-Selecting previously unselected package openssh-client.
-Preparing to unpack .../openssh-client_1%3a7.2p2-4ubuntu2.8_amd64.deb ...
-Unpacking openssh-client (1:7.2p2-4ubuntu2.8) ...
-Selecting previously unselected package xauth.
-Preparing to unpack .../xauth_1%3a1.0.9-1ubuntu2_amd64.deb ...
-Unpacking xauth (1:1.0.9-1ubuntu2) ...
-Selecting previously unselected package ssl-cert.
-Preparing to unpack .../ssl-cert_1.0.37_all.deb ...
-Unpacking ssl-cert (1.0.37) ...
-Selecting previously unselected package postfix.
-Preparing to unpack .../postfix_3.1.0-3ubuntu0.3_amd64.deb ...
-Unpacking postfix (3.1.0-3ubuntu0.3) ...
-Selecting previously unselected package bsd-mailx.
-Preparing to unpack .../bsd-mailx_8.1.2-0.20160123cvs-2_amd64.deb ...
-Unpacking bsd-mailx (8.1.2-0.20160123cvs-2) ...
-Selecting previously unselected package gridengine-common.
-Preparing to unpack .../gridengine-common_6.2u5-7.4_all.deb ...
-Unpacking gridengine-common (6.2u5-7.4) ...
-Selecting previously unselected package gridengine-client.
-Preparing to unpack .../gridengine-client_6.2u5-7.4_amd64.deb ...
-Unpacking gridengine-client (6.2u5-7.4) ...
-Selecting previously unselected package tcsh.
-Preparing to unpack .../tcsh_6.18.01-5_amd64.deb ...
-Unpacking tcsh (6.18.01-5) ...
-Selecting previously unselected package gridengine-exec.
-Preparing to unpack .../gridengine-exec_6.2u5-7.4_amd64.deb ...
-Unpacking gridengine-exec (6.2u5-7.4) ...
-Selecting previously unselected package gridengine-master.
-Preparing to unpack .../gridengine-master_6.2u5-7.4_amd64.deb ...
-Unpacking gridengine-master (6.2u5-7.4) ...
-Selecting previously unselected package ncurses-term.
-Preparing to unpack .../ncurses-term_6.0+20160213-1ubuntu1_all.deb ...
-Unpacking ncurses-term (6.0+20160213-1ubuntu1) ...
-Selecting previously unselected package openssh-sftp-server.
-Preparing to unpack .../openssh-sftp-server_1%3a7.2p2-4ubuntu2.8_amd64.deb ...
-Unpacking openssh-sftp-server (1:7.2p2-4ubuntu2.8) ...
-Selecting previously unselected package openssh-server.
-Preparing to unpack .../openssh-server_1%3a7.2p2-4ubuntu2.8_amd64.deb ...
-Unpacking openssh-server (1:7.2p2-4ubuntu2.8) ...
-Selecting previously unselected package python3-pkg-resources.
-Preparing to unpack .../python3-pkg-resources_20.7.0-1_all.deb ...
-Unpacking python3-pkg-resources (20.7.0-1) ...
-Selecting previously unselected package python3-chardet.
-Preparing to unpack .../python3-chardet_2.3.0-2_all.deb ...
-Unpacking python3-chardet (2.3.0-2) ...
-Selecting previously unselected package python3-six.
-Preparing to unpack .../python3-six_1.10.0-3_all.deb ...
-Unpacking python3-six (1.10.0-3) ...
-Selecting previously unselected package python3-urllib3.
-Preparing to unpack .../python3-urllib3_1.13.1-2ubuntu0.16.04.3_all.deb ...
-Unpacking python3-urllib3 (1.13.1-2ubuntu0.16.04.3) ...
-Selecting previously unselected package python3-requests.
-Preparing to unpack .../python3-requests_2.9.1-3ubuntu0.1_all.deb ...
-Unpacking python3-requests (2.9.1-3ubuntu0.1) ...
-Selecting previously unselected package ssh-import-id.
-Preparing to unpack .../ssh-import-id_5.5-0ubuntu1_all.deb ...
-Unpacking ssh-import-id (5.5-0ubuntu1) ...
-Processing triggers for systemd (229-4ubuntu21.22) ...
-Processing triggers for libc-bin (2.23-0ubuntu11) ...
-Setting up libatm1:amd64 (1:2.5.1-1.5) ...
-Setting up libmnl0:amd64 (1.0.3-5) ...
-Setting up liblockfile-bin (1.09-6ubuntu1) ...
-Setting up liblockfile1:amd64 (1.09-6ubuntu1) ...
-Setting up cpio (2.11+dfsg-5ubuntu1) ...
-update-alternatives: using /bin/mt-gnu to provide /bin/mt (mt) in auto mode
-Setting up iproute2 (4.3.0-1ubuntu3.16.04.5) ...
-Setting up ifupdown (0.8.10ubuntu1.4) ...
-Creating /etc/network/interfaces.
-Setting up libisc-export160 (1:9.10.3.dfsg.P4-8ubuntu1.15) ...
-Setting up libdns-export162 (1:9.10.3.dfsg.P4-8ubuntu1.15) ...
-Setting up isc-dhcp-client (4.3.3-5ubuntu12.10) ...
-Setting up isc-dhcp-common (4.3.3-5ubuntu12.10) ...
-Setting up libxtables11:amd64 (1.6.0-2ubuntu3) ...
-Setting up netbase (5.3) ...
-Setting up libxmuu1:amd64 (2:1.1.2-2) ...
-Setting up openssh-client (1:7.2p2-4ubuntu2.8) ...
-Setting up xauth (1:1.0.9-1ubuntu2) ...
-Setting up ssl-cert (1.0.37) ...
-Setting up postfix (3.1.0-3ubuntu0.3) ...
-Creating /etc/postfix/dynamicmaps.cf
-setting myhostname: master-0.XXX
-setting alias maps
-setting alias database
-changing /etc/mailname to master-0.XXX
-setting myorigin
-setting destinations: $myhostname, master-0.XXX, localhost.XXX, , localhost
-setting relayhost:
-setting mynetworks: 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
-setting mailbox_size_limit: 0
-setting recipient_delimiter: +
-setting inet_interfaces: all
-setting inet_protocols: all
-/etc/aliases does not exist, creating it.
-WARNING: /etc/aliases exists, but does not have a root alias.
-
-Postfix is now set up with a default configuration.  If you need to make
-changes, edit
-/etc/postfix/main.cf (and others) as needed.  To view Postfix configuration
-values, see postconf(1).
-
-After modifying main.cf, be sure to run '/etc/init.d/postfix reload'.
-
-Running newaliases
-invoke-rc.d: could not determine current runlevel
-invoke-rc.d: policy-rc.d denied execution of restart.
-Setting up bsd-mailx (8.1.2-0.20160123cvs-2) ...
-update-alternatives: using /usr/bin/bsd-mailx to provide /usr/bin/mailx (mailx) in auto mode
-Setting up gridengine-common (6.2u5-7.4) ...
-
-Creating config file /etc/default/gridengine with new version
-Setting up gridengine-client (6.2u5-7.4) ...
-Setting up tcsh (6.18.01-5) ...
-update-alternatives: using /bin/tcsh to provide /bin/csh (csh) in auto mode
-Setting up gridengine-exec (6.2u5-7.4) ...
-invoke-rc.d: could not determine current runlevel
-invoke-rc.d: policy-rc.d denied execution of start.
-Setting up gridengine-master (6.2u5-7.4) ...
-su: Authentication failure
-(Ignored)
-Initializing cluster with the following parameters:
- => SGE_ROOT: /var/lib/gridengine
- => SGE_CELL: default
- => Spool directory: /var/spool/gridengine/spooldb
- => Initial manager user: sgeadmin
-Initializing spool (/var/spool/gridengine/spooldb)
-Initializing global configuration based on /usr/share/gridengine/default-configuration
-Initializing complexes based on /usr/share/gridengine/centry
-Initializing usersets based on /usr/share/gridengine/usersets
-Adding user sgeadmin as a manager
-Cluster creation complete
-invoke-rc.d: could not determine current runlevel
-invoke-rc.d: policy-rc.d denied execution of start.
-Setting up ncurses-term (6.0+20160213-1ubuntu1) ...
-Setting up openssh-sftp-server (1:7.2p2-4ubuntu2.8) ...
-Setting up openssh-server (1:7.2p2-4ubuntu2.8) ...
-Creating SSH2 RSA key; this may take some time ...
-2048 SHA256:hfQpES1aS4cjF8AOCIParZR6342vdwutoyITru0wtuE root@master-0.XXX (RSA)
-Creating SSH2 DSA key; this may take some time ...
-1024 SHA256:gOsPMVgwXBHJzixN/gtJAG+hVCHqw8t7Fhy4nsx8od0 root@master-0.XXX (DSA)
-Creating SSH2 ECDSA key; this may take some time ...
-256 SHA256:3D5SNniUb4z+/BuqXheFgG+DfjsxXqTT/zwWAqdX4jM root@master-0.XXX (ECDSA)
-Creating SSH2 ED25519 key; this may take some time ...
-256 SHA256:SwyeV9iSqOW4TKLi4Wvc0zD8lWtupHCJpDu8oWBwbfU root@master-0.XXX (ED25519)
-invoke-rc.d: could not determine current runlevel
-invoke-rc.d: policy-rc.d denied execution of start.
-Setting up python3-pkg-resources (20.7.0-1) ...
-Setting up python3-chardet (2.3.0-2) ...
-Setting up python3-six (1.10.0-3) ...
-Setting up python3-urllib3 (1.13.1-2ubuntu0.16.04.3) ...
-Setting up python3-requests (2.9.1-3ubuntu0.1) ...
-Setting up ssh-import-id (5.5-0ubuntu1) ...
-Processing triggers for libc-bin (2.23-0ubuntu11) ...
-Processing triggers for systemd (229-4ubuntu21.22) ...
-Reading package lists...
-Building dependency tree...
-Reading state information...
-0 upgraded, 0 newly installed, 0 to remove and 30 not upgraded.
-```
-
-cat $SGE_CFG_PATH/setcfg.log
-```
-finish master
-add worker node worker-0.XXX
-```
-
-Sample output of worker:
-```
-please wait
-Reading package lists...
-Building dependency tree...
-Reading state information...
-The following additional packages will be installed:
-  bsd-mailx cpio gridengine-common ifupdown iproute2 isc-dhcp-client
-  isc-dhcp-common libatm1 libdns-export162 libisc-export160 liblockfile-bin
-  liblockfile1 libmnl0 libxmuu1 libxtables11 ncurses-term netbase
-  openssh-client openssh-server openssh-sftp-server postfix python3-chardet
-  python3-pkg-resources python3-requests python3-six python3-urllib3
-  ssh-import-id ssl-cert tcsh xauth
-Suggested packages:
-  libarchive1 gridengine-qmon ppp rdnssd iproute2-doc resolvconf avahi-autoipd
-  isc-dhcp-client-ddns apparmor ssh-askpass libpam-ssh keychain monkeysphere
-  rssh molly-guard ufw procmail postfix-mysql postfix-pgsql postfix-ldap
-  postfix-pcre sasl2-bin libsasl2-modules dovecot-common postfix-cdb
-  postfix-doc python3-setuptools python3-ndg-httpsclient python3-openssl
-  python3-pyasn1 openssl-blacklist
-The following NEW packages will be installed:
-  bsd-mailx cpio gridengine-client gridengine-common gridengine-exec ifupdown
-  iproute2 isc-dhcp-client isc-dhcp-common libatm1 libdns-export162
-  libisc-export160 liblockfile-bin liblockfile1 libmnl0 libxmuu1 libxtables11
-  ncurses-term netbase openssh-client openssh-server openssh-sftp-server
-  postfix python3-chardet python3-pkg-resources python3-requests python3-six
-  python3-urllib3 ssh-import-id ssl-cert tcsh xauth
-0 upgraded, 32 newly installed, 0 to remove and 30 not upgraded.
-Need to get 9633 kB of archives.
-After this operation, 51.2 MB of additional disk space will be used.
-Get:1 http://archive.ubuntu.com/ubuntu xenial/main amd64 libatm1 amd64 1:2.5.1-1.5 [24.2 kB]
-Get:2 http://archive.ubuntu.com/ubuntu xenial/main amd64 libmnl0 amd64 1.0.3-5 [12.0 kB]
-Get:3 http://archive.ubuntu.com/ubuntu xenial/main amd64 liblockfile-bin amd64 1.09-6ubuntu1 [10.8 kB]
-Get:4 http://archive.ubuntu.com/ubuntu xenial/main amd64 liblockfile1 amd64 1.09-6ubuntu1 [8056 B]
-Get:5 http://archive.ubuntu.com/ubuntu xenial/main amd64 cpio amd64 2.11+dfsg-5ubuntu1 [74.8 kB]
-Get:6 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 iproute2 amd64 4.3.0-1ubuntu3.16.04.5 [523 kB]
-Get:7 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 ifupdown amd64 0.8.10ubuntu1.4 [54.9 kB]
-Get:8 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libisc-export160 amd64 1:9.10.3.dfsg.P4-8ubuntu1.15 [153 kB]
-Get:9 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 libdns-export162 amd64 1:9.10.3.dfsg.P4-8ubuntu1.15 [665 kB]
-Get:10 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 isc-dhcp-client amd64 4.3.3-5ubuntu12.10 [224 kB]
-Get:11 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 isc-dhcp-common amd64 4.3.3-5ubuntu12.10 [105 kB]
-Get:12 http://archive.ubuntu.com/ubuntu xenial/main amd64 libxtables11 amd64 1.6.0-2ubuntu3 [27.2 kB]
-Get:13 http://archive.ubuntu.com/ubuntu xenial/main amd64 netbase all 5.3 [12.9 kB]
-Get:14 http://archive.ubuntu.com/ubuntu xenial/main amd64 libxmuu1 amd64 2:1.1.2-2 [9674 B]
-Get:15 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 openssh-client amd64 1:7.2p2-4ubuntu2.8 [590 kB]
-Get:16 http://archive.ubuntu.com/ubuntu xenial/main amd64 xauth amd64 1:1.0.9-1ubuntu2 [22.7 kB]
-Get:17 http://archive.ubuntu.com/ubuntu xenial/main amd64 ssl-cert all 1.0.37 [16.9 kB]
-Get:18 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 postfix amd64 3.1.0-3ubuntu0.3 [1152 kB]
-Get:19 http://archive.ubuntu.com/ubuntu xenial/main amd64 bsd-mailx amd64 8.1.2-0.20160123cvs-2 [63.7 kB]
-Get:20 http://archive.ubuntu.com/ubuntu xenial/universe amd64 gridengine-common all 6.2u5-7.4 [156 kB]
-Get:21 http://archive.ubuntu.com/ubuntu xenial/universe amd64 gridengine-client amd64 6.2u5-7.4 [3394 kB]
-Get:22 http://archive.ubuntu.com/ubuntu xenial/universe amd64 tcsh amd64 6.18.01-5 [410 kB]
-Get:23 http://archive.ubuntu.com/ubuntu xenial/universe amd64 gridengine-exec amd64 6.2u5-7.4 [990 kB]
-Get:24 http://archive.ubuntu.com/ubuntu xenial/main amd64 ncurses-term all 6.0+20160213-1ubuntu1 [249 kB]
-Get:25 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 openssh-sftp-server amd64 1:7.2p2-4ubuntu2.8 [38.9 kB]
-Get:26 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 openssh-server amd64 1:7.2p2-4ubuntu2.8 [335 kB]
-Get:27 http://archive.ubuntu.com/ubuntu xenial/main amd64 python3-pkg-resources all 20.7.0-1 [79.0 kB]
-Get:28 http://archive.ubuntu.com/ubuntu xenial/main amd64 python3-chardet all 2.3.0-2 [96.2 kB]
-Get:29 http://archive.ubuntu.com/ubuntu xenial/main amd64 python3-six all 1.10.0-3 [11.0 kB]
-Get:30 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 python3-urllib3 all 1.13.1-2ubuntu0.16.04.3 [58.5 kB]
-Get:31 http://archive.ubuntu.com/ubuntu xenial-updates/main amd64 python3-requests all 2.9.1-3ubuntu0.1 [55.8 kB]
-Get:32 http://archive.ubuntu.com/ubuntu xenial/main amd64 ssh-import-id all 5.5-0ubuntu1 [10.2 kB]
-Fetched 9633 kB in 2s (4496 kB/s)
-Selecting previously unselected package libatm1:amd64.
-(Reading database ...
-(Reading database ... 5%
-(Reading database ... 10%
-(Reading database ... 15%
-(Reading database ... 20%
-(Reading database ... 25%
-(Reading database ... 30%
-(Reading database ... 35%
-(Reading database ... 40%
-(Reading database ... 45%
-(Reading database ... 50%
-(Reading database ... 55%
-(Reading database ... 60%
-(Reading database ... 65%
-(Reading database ... 70%
-(Reading database ... 75%
-(Reading database ... 80%
-(Reading database ... 85%
-(Reading database ... 90%
-(Reading database ... 95%
-(Reading database ... 100%
-(Reading database ... 21398 files and directories currently installed.)
-Preparing to unpack .../libatm1_1%3a2.5.1-1.5_amd64.deb ...
-Unpacking libatm1:amd64 (1:2.5.1-1.5) ...
-Selecting previously unselected package libmnl0:amd64.
-Preparing to unpack .../libmnl0_1.0.3-5_amd64.deb ...
-Unpacking libmnl0:amd64 (1.0.3-5) ...
-Selecting previously unselected package liblockfile-bin.
-Preparing to unpack .../liblockfile-bin_1.09-6ubuntu1_amd64.deb ...
-Unpacking liblockfile-bin (1.09-6ubuntu1) ...
-Selecting previously unselected package liblockfile1:amd64.
-Preparing to unpack .../liblockfile1_1.09-6ubuntu1_amd64.deb ...
-Unpacking liblockfile1:amd64 (1.09-6ubuntu1) ...
-Selecting previously unselected package cpio.
-Preparing to unpack .../cpio_2.11+dfsg-5ubuntu1_amd64.deb ...
-Unpacking cpio (2.11+dfsg-5ubuntu1) ...
-Selecting previously unselected package iproute2.
-Preparing to unpack .../iproute2_4.3.0-1ubuntu3.16.04.5_amd64.deb ...
-Unpacking iproute2 (4.3.0-1ubuntu3.16.04.5) ...
-Selecting previously unselected package ifupdown.
-Preparing to unpack .../ifupdown_0.8.10ubuntu1.4_amd64.deb ...
-Unpacking ifupdown (0.8.10ubuntu1.4) ...
-Selecting previously unselected package libisc-export160.
-Preparing to unpack .../libisc-export160_1%3a9.10.3.dfsg.P4-8ubuntu1.15_amd64.deb ...
-Unpacking libisc-export160 (1:9.10.3.dfsg.P4-8ubuntu1.15) ...
-Selecting previously unselected package libdns-export162.
-Preparing to unpack .../libdns-export162_1%3a9.10.3.dfsg.P4-8ubuntu1.15_amd64.deb ...
-Unpacking libdns-export162 (1:9.10.3.dfsg.P4-8ubuntu1.15) ...
-Selecting previously unselected package isc-dhcp-client.
-Preparing to unpack .../isc-dhcp-client_4.3.3-5ubuntu12.10_amd64.deb ...
-Unpacking isc-dhcp-client (4.3.3-5ubuntu12.10) ...
-Selecting previously unselected package isc-dhcp-common.
-Preparing to unpack .../isc-dhcp-common_4.3.3-5ubuntu12.10_amd64.deb ...
-Unpacking isc-dhcp-common (4.3.3-5ubuntu12.10) ...
-Selecting previously unselected package libxtables11:amd64.
-Preparing to unpack .../libxtables11_1.6.0-2ubuntu3_amd64.deb ...
-Unpacking libxtables11:amd64 (1.6.0-2ubuntu3) ...
-Selecting previously unselected package netbase.
-Preparing to unpack .../archives/netbase_5.3_all.deb ...
-Unpacking netbase (5.3) ...
-Selecting previously unselected package libxmuu1:amd64.
-Preparing to unpack .../libxmuu1_2%3a1.1.2-2_amd64.deb ...
-Unpacking libxmuu1:amd64 (2:1.1.2-2) ...
-Selecting previously unselected package openssh-client.
-Preparing to unpack .../openssh-client_1%3a7.2p2-4ubuntu2.8_amd64.deb ...
-Unpacking openssh-client (1:7.2p2-4ubuntu2.8) ...
-Selecting previously unselected package xauth.
-Preparing to unpack .../xauth_1%3a1.0.9-1ubuntu2_amd64.deb ...
-Unpacking xauth (1:1.0.9-1ubuntu2) ...
-Selecting previously unselected package ssl-cert.
-Preparing to unpack .../ssl-cert_1.0.37_all.deb ...
-Unpacking ssl-cert (1.0.37) ...
-Selecting previously unselected package postfix.
-Preparing to unpack .../postfix_3.1.0-3ubuntu0.3_amd64.deb ...
-Unpacking postfix (3.1.0-3ubuntu0.3) ...
-Selecting previously unselected package bsd-mailx.
-Preparing to unpack .../bsd-mailx_8.1.2-0.20160123cvs-2_amd64.deb ...
-Unpacking bsd-mailx (8.1.2-0.20160123cvs-2) ...
-Selecting previously unselected package gridengine-common.
-Preparing to unpack .../gridengine-common_6.2u5-7.4_all.deb ...
-Unpacking gridengine-common (6.2u5-7.4) ...
-Selecting previously unselected package gridengine-client.
-Preparing to unpack .../gridengine-client_6.2u5-7.4_amd64.deb ...
-Unpacking gridengine-client (6.2u5-7.4) ...
-Selecting previously unselected package tcsh.
-Preparing to unpack .../tcsh_6.18.01-5_amd64.deb ...
-Unpacking tcsh (6.18.01-5) ...
-Selecting previously unselected package gridengine-exec.
-Preparing to unpack .../gridengine-exec_6.2u5-7.4_amd64.deb ...
-Unpacking gridengine-exec (6.2u5-7.4) ...
-Selecting previously unselected package ncurses-term.
-Preparing to unpack .../ncurses-term_6.0+20160213-1ubuntu1_all.deb ...
-Unpacking ncurses-term (6.0+20160213-1ubuntu1) ...
-Selecting previously unselected package openssh-sftp-server.
-Preparing to unpack .../openssh-sftp-server_1%3a7.2p2-4ubuntu2.8_amd64.deb ...
-Unpacking openssh-sftp-server (1:7.2p2-4ubuntu2.8) ...
-Selecting previously unselected package openssh-server.
-Preparing to unpack .../openssh-server_1%3a7.2p2-4ubuntu2.8_amd64.deb ...
-Unpacking openssh-server (1:7.2p2-4ubuntu2.8) ...
-Selecting previously unselected package python3-pkg-resources.
-Preparing to unpack .../python3-pkg-resources_20.7.0-1_all.deb ...
-Unpacking python3-pkg-resources (20.7.0-1) ...
-Selecting previously unselected package python3-chardet.
-Preparing to unpack .../python3-chardet_2.3.0-2_all.deb ...
-Unpacking python3-chardet (2.3.0-2) ...
-Selecting previously unselected package python3-six.
-Preparing to unpack .../python3-six_1.10.0-3_all.deb ...
-Unpacking python3-six (1.10.0-3) ...
-Selecting previously unselected package python3-urllib3.
-Preparing to unpack .../python3-urllib3_1.13.1-2ubuntu0.16.04.3_all.deb ...
-Unpacking python3-urllib3 (1.13.1-2ubuntu0.16.04.3) ...
-Selecting previously unselected package python3-requests.
-Preparing to unpack .../python3-requests_2.9.1-3ubuntu0.1_all.deb ...
-Unpacking python3-requests (2.9.1-3ubuntu0.1) ...
-Selecting previously unselected package ssh-import-id.
-Preparing to unpack .../ssh-import-id_5.5-0ubuntu1_all.deb ...
-Unpacking ssh-import-id (5.5-0ubuntu1) ...
-Processing triggers for systemd (229-4ubuntu21.22) ...
-Processing triggers for libc-bin (2.23-0ubuntu11) ...
-Setting up libatm1:amd64 (1:2.5.1-1.5) ...
-Setting up libmnl0:amd64 (1.0.3-5) ...
-Setting up liblockfile-bin (1.09-6ubuntu1) ...
-Setting up liblockfile1:amd64 (1.09-6ubuntu1) ...
-Setting up cpio (2.11+dfsg-5ubuntu1) ...
-update-alternatives: using /bin/mt-gnu to provide /bin/mt (mt) in auto mode
-Setting up iproute2 (4.3.0-1ubuntu3.16.04.5) ...
-Setting up ifupdown (0.8.10ubuntu1.4) ...
-Creating /etc/network/interfaces.
-Setting up libisc-export160 (1:9.10.3.dfsg.P4-8ubuntu1.15) ...
-Setting up libdns-export162 (1:9.10.3.dfsg.P4-8ubuntu1.15) ...
-Setting up isc-dhcp-client (4.3.3-5ubuntu12.10) ...
-Setting up isc-dhcp-common (4.3.3-5ubuntu12.10) ...
-Setting up libxtables11:amd64 (1.6.0-2ubuntu3) ...
-Setting up netbase (5.3) ...
-Setting up libxmuu1:amd64 (2:1.1.2-2) ...
-Setting up openssh-client (1:7.2p2-4ubuntu2.8) ...
-Setting up xauth (1:1.0.9-1ubuntu2) ...
-Setting up ssl-cert (1.0.37) ...
-Setting up postfix (3.1.0-3ubuntu0.3) ...
-Creating /etc/postfix/dynamicmaps.cf
-setting myhostname: worker-0.XXX
-setting alias maps
-setting alias database
-changing /etc/mailname to worker-0.XXX
-setting myorigin
-setting destinations: $myhostname, worker-0.XXX, localhost.XXX, , localhost
-setting relayhost:
-setting mynetworks: 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
-setting mailbox_size_limit: 0
-setting recipient_delimiter: +
-setting inet_interfaces: all
-setting inet_protocols: all
-/etc/aliases does not exist, creating it.
-WARNING: /etc/aliases exists, but does not have a root alias.
-
-Postfix is now set up with a default configuration.  If you need to make
-changes, edit
-/etc/postfix/main.cf (and others) as needed.  To view Postfix configuration
-values, see postconf(1).
-
-After modifying main.cf, be sure to run '/etc/init.d/postfix reload'.
-
-Running newaliases
-invoke-rc.d: could not determine current runlevel
-invoke-rc.d: policy-rc.d denied execution of restart.
-Setting up bsd-mailx (8.1.2-0.20160123cvs-2) ...
-update-alternatives: using /usr/bin/bsd-mailx to provide /usr/bin/mailx (mailx) in auto mode
-Setting up gridengine-common (6.2u5-7.4) ...
-
-Creating config file /etc/default/gridengine with new version
-Setting up gridengine-client (6.2u5-7.4) ...
-Setting up tcsh (6.18.01-5) ...
-update-alternatives: using /bin/tcsh to provide /bin/csh (csh) in auto mode
-Setting up gridengine-exec (6.2u5-7.4) ...
-invoke-rc.d: could not determine current runlevel
-invoke-rc.d: policy-rc.d denied execution of start.
-Setting up ncurses-term (6.0+20160213-1ubuntu1) ...
-Setting up openssh-sftp-server (1:7.2p2-4ubuntu2.8) ...
-Setting up openssh-server (1:7.2p2-4ubuntu2.8) ...
-Creating SSH2 RSA key; this may take some time ...
-2048 SHA256:ok/TxzwtF5W8I55sDxrt4Agy4fuWn39BiSovvDObhVE root@worker-0.XXX (RSA)
-Creating SSH2 DSA key; this may take some time ...
-1024 SHA256:4y48kVYt3mS3q1KgZzEoYMnS/2d/tA8TJUK5uNSaxZY root@worker-0.XXX (DSA)
-Creating SSH2 ECDSA key; this may take some time ...
-256 SHA256:4D7zm4cD2IbDnHoXnzcIo3FISbvOW8eOstGBNf1/bvo root@worker-0.XXX (ECDSA)
-Creating SSH2 ED25519 key; this may take some time ...
-256 SHA256:/HrA3xiZiH5CZkXwtcfE6GwcMM+hEhZzTdFHxj4PzDg root@worker-0.XXX (ED25519)
-invoke-rc.d: could not determine current runlevel
-invoke-rc.d: policy-rc.d denied execution of start.
-Setting up python3-pkg-resources (20.7.0-1) ...
-Setting up python3-chardet (2.3.0-2) ...
-Setting up python3-six (1.10.0-3) ...
-Setting up python3-urllib3 (1.13.1-2ubuntu0.16.04.3) ...
-Setting up python3-requests (2.9.1-3ubuntu0.1) ...
-Setting up ssh-import-id (5.5-0ubuntu1) ...
-Processing triggers for libc-bin (2.23-0ubuntu11) ...
-Processing triggers for systemd (229-4ubuntu21.22) ...
-Reading package lists...
-Building dependency tree...
-Reading state information...
-0 upgraded, 0 newly installed, 0 to remove and 30 not upgraded.
-```
-
-cat $SGE_CFG_PATH/setcfg.log
-```
-please wait
-Start SGE for worker is finished
-done for worker-0.XXX worker.
-```
-
-Sample output of sge:
-![alt text](/img/sge/sge_cluster.png "SGE for multiple jobs")
-
-![alt text](/img/sge/sge_stat.png "SGE for user multiple jobs")
diff --git a/website/docs/ecosystem/kaldi/WriteDockerfileKaldi.md b/website/docs/ecosystem/kaldi/WriteDockerfileKaldi.md
deleted file mode 100644
index 8e0449b..0000000
--- a/website/docs/ecosystem/kaldi/WriteDockerfileKaldi.md
+++ /dev/null
@@ -1,112 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-## Creating Docker Images for Running Kaldi on YARN
-
-### How to create docker images to run Kaldi on YARN
-
-Dockerfile to run Kaldi on YARN need two part:
-
-**Base libraries which Kaldi depends on**
-
-1) OS base image, for example ```nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04```
-
-2) Kaldi depended libraries and packages. For example ```python```, ```g++```, ```make```. For GPU support, need ```cuda```, ```cudnn```, etc.
-
-3) Kaldi compile.
-
-**Libraries to access HDFS**
-
-1) JDK
-
-2) Hadoop
-
-Here's an example of a base image (w/o GPU support) to install Kaldi:
-```shell
-FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04
-
-RUN apt-get clean && \
-    apt-get update && \
-    apt-get install -y --no-install-recommends \
-        sudo \
-        openjdk-8-jdk \
-        iputils-ping \
-        g++ \
-        make \
-        automake \
-        autoconf \
-        bzip2 \
-        unzip \
-        wget \
-        sox \
-        libtool \
-        git \
-        subversion \
-        python2.7 \
-        python3 \
-        zlib1g-dev \
-        ca-certificates \
-        patch \
-        ffmpeg \
-        vim && \
-        rm -rf /var/lib/apt/lists/* && \
-        ln -s /usr/bin/python2.7 /usr/bin/python
-
-RUN git clone --depth 1 https://github.com/kaldi-asr/kaldi.git /opt/kaldi && \
-    cd /opt/kaldi && \
-    cd /opt/kaldi/tools && \
-    ./extras/install_mkl.sh && \
-    make -j $(nproc) && \
-    cd /opt/kaldi/src && \
-    ./configure --shared --use-cuda && \
-    make depend -j $(nproc) && \
-    make -j $(nproc)
-```
-
-On top of above image, add files, install packages to access HDFS
-```shell
-RUN apt-get update && apt-get install -y openjdk-8-jdk wget
-# Install hadoop
-ENV HADOOP_VERSION="3.2.1"
-ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz && \
-    tar zxf hadoop-${HADOOP_VERSION}.tar.gz && \
-    ln -s hadoop-${HADOOP_VERSION} hadoop-current && \
-    rm hadoop-${HADOOP_VERSION}.tar.gz
-```
-
-Build and push to your own docker registry: Use ```docker build ... ``` and ```docker push ...``` to finish this step.
-
-### Use examples to build your own Kaldi docker images
-
-We provided following examples for you to build kaldi docker images.
-
-For latest Kaldi
-
-- *base/ubuntu-18.04/Dockerfile.gpu.kaldi_latest: Latest Kaldi that supports GPU, which is prebuilt to CUDA10, with models.
-
-### Build Docker images
-
-#### Manually build Docker image:
-
-Under `docker/` directory,The CLUSTER_NAME can be modified in build-all.sh to have installation permissions, run `build-all.sh` to build Docker images. It will build following images:
-
-- `kaldi-latest-gpu-base:0.0.1` for base Docker image which includes Hadoop, Kaldi, GPU base libraries, which includes thchs30 model.
-
-#### Use prebuilt images
-
-(No liability)
-You can also use prebuilt images for convenience in the docker hub:
-- hadoopsubmarine/kaldi-latest-gpu-base:0.0.1
diff --git a/website/docs/ecosystem/kaldi/base/ubuntu-18.04/Dockerfile.gpu.kaldi_latest b/website/docs/ecosystem/kaldi/base/ubuntu-18.04/Dockerfile.gpu.kaldi_latest
deleted file mode 100644
index 44e2ae2..0000000
--- a/website/docs/ecosystem/kaldi/base/ubuntu-18.04/Dockerfile.gpu.kaldi_latest
+++ /dev/null
@@ -1,74 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04
-
-RUN apt-get clean && \
-    apt-get update && \
-    apt-get install -y --no-install-recommends \
-        sudo \
-        openjdk-8-jdk \
-        iputils-ping \
-        g++ \
-        make \
-        automake \
-        autoconf \
-        bzip2 \
-        unzip \
-        wget \
-        sox \
-        libtool \
-        git \
-        subversion \
-        python2.7 \
-        python3 \
-        zlib1g-dev \
-        ca-certificates \
-        patch \
-        ffmpeg \
-        vim && \
-        rm -rf /var/lib/apt/lists/* && \
-        ln -s /usr/bin/python2.7 /usr/bin/python
-
-RUN git clone --depth 1 https://github.com/kaldi-asr/kaldi.git /opt/kaldi && \
-    cd /opt/kaldi && \
-    cd /opt/kaldi/tools && \
-    ./extras/install_mkl.sh && \
-    make -j $(nproc) && \
-    cd /opt/kaldi/src && \
-    ./configure --shared --use-cuda && \
-    make depend -j $(nproc) && \
-    make -j $(nproc)
-
-WORKDIR /
-# Install Hadoop
-ENV HADOOP_VERSION="3.2.1"
-ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-ARG CLUSTER_NAME=submarine
-RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz && \
-    tar zxf hadoop-${HADOOP_VERSION}.tar.gz && \
-    ln -s hadoop-${HADOOP_VERSION} hadoop-current && \
-    rm hadoop-${HADOOP_VERSION}.tar.gz
-
-RUN echo "Install python related packages" && \
-    pip --no-cache-dir install Pillow h5py ipykernel jupyter matplotlib numpy pandas scipy sklearn && \
-    python -m ipykernel.kernelspec
-
-ENV CLUSTER_NAME="admin" # Your cluster user sets root privileges
-RUN echo "## Allow root to run any commands anywhere" >> /etc/sudoers && \
-    echo "User_Alias   USER_ROOT   = tf-docker, $CLUSTER_NAME" >> /etc/sudoers && \
-    echo "root        ALL=(ALL)    ALL" >> /etc/sudoers && \
-    echo "USER_ROOT   ALL=(ALL)    ALL" >> /etc/sudoers && \
-    echo "USER_ROOT   ALL=(ALL)    NOPASSWD: ALL" >> /etc/sudoers
\ No newline at end of file
diff --git a/website/docs/ecosystem/kaldi/build-all.sh b/website/docs/ecosystem/kaldi/build-all.sh
deleted file mode 100755
index 61c590b..0000000
--- a/website/docs/ecosystem/kaldi/build-all.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-echo "Building base images"
-
-set -euo pipefail
-
-cd base/ubuntu-18.04
-
-docker build --build-arg CLUSTER_NAME=test . -f Dockerfile.gpu.kaldi_latest -t kaldi-latest-gpu-base:0.0.1
-
-echo "Finished building base images"
diff --git a/website/docs/ecosystem/kaldi/sge/gencfs.sh b/website/docs/ecosystem/kaldi/sge/gencfs.sh
deleted file mode 100644
index 4e5482d..0000000
--- a/website/docs/ecosystem/kaldi/sge/gencfs.sh
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-WORKER_HOST_STRS=$1
-SLOTS=$2
-
-cat << EOF  > /tmp/qconf-mc.txt
-arch                a           RESTRING    ==    YES         NO         NONE     0
-calendar            c           RESTRING    ==    YES         NO         NONE     0
-cpu                 cpu         DOUBLE      >=    YES         NO         0        0
-display_win_gui     dwg         BOOL        ==    YES         NO         0        0
-gpu                 g           INT         <=    YES         YES        0        10000
-h_core              h_core      MEMORY      <=    YES         NO         0        0
-h_cpu               h_cpu       TIME        <=    YES         NO         0:0:0    0
-h_data              h_data      MEMORY      <=    YES         NO         0        0
-h_fsize             h_fsize     MEMORY      <=    YES         NO         0        0
-h_rss               h_rss       MEMORY      <=    YES         NO         0        0
-h_rt                h_rt        TIME        <=    YES         NO         0:0:0    0
-h_stack             h_stack     MEMORY      <=    YES         NO         0        0
-h_vmem              h_vmem      MEMORY      <=    YES         NO         0        0
-hostname            h           HOST        ==    YES         NO         NONE     0
-load_avg            la          DOUBLE      >=    NO          NO         0        0
-load_long           ll          DOUBLE      >=    NO          NO         0        0
-load_medium         lm          DOUBLE      >=    NO          NO         0        0
-load_short          ls          DOUBLE      >=    NO          NO         0        0
-m_core              core        INT         <=    YES         NO         0        0
-m_socket            socket      INT         <=    YES         NO         0        0
-m_topology          topo        RESTRING    ==    YES         NO         NONE     0
-m_topology_inuse    utopo       RESTRING    ==    YES         NO         NONE     0
-mem_free            mf          MEMORY      <=    YES         NO         0        0
-mem_total           mt          MEMORY      <=    YES         NO         0        0
-mem_used            mu          MEMORY      >=    YES         NO         0        0
-min_cpu_interval    mci         TIME        <=    NO          NO         0:0:0    0
-np_load_avg         nla         DOUBLE      >=    NO          NO         0        0
-np_load_long        nll         DOUBLE      >=    NO          NO         0        0
-np_load_medium      nlm         DOUBLE      >=    NO          NO         0        0
-np_load_short       nls         DOUBLE      >=    NO          NO         0        0
-num_proc            p           INT         ==    YES         NO         0        0
-qname               q           RESTRING    ==    YES         NO         NONE     0
-ram_free            ram_free    MEMORY      <=    YES         JOB        0        0
-rerun               re          BOOL        ==    NO          NO         0        0
-s_core              s_core      MEMORY      <=    YES         NO         0        0
-s_cpu               s_cpu       TIME        <=    YES         NO         0:0:0    0
-s_data              s_data      MEMORY      <=    YES         NO         0        0
-s_fsize             s_fsize     MEMORY      <=    YES         NO         0        0
-s_rss               s_rss       MEMORY      <=    YES         NO         0        0
-s_rt                s_rt        TIME        <=    YES         NO         0:0:0    0
-s_stack             s_stack     MEMORY      <=    YES         NO         0        0
-s_vmem              s_vmem      MEMORY      <=    YES         NO         0        0
-seq_no              seq         INT         ==    NO          NO         0        0
-slots               s           INT         <=    YES         YES        1        1000
-swap_free           sf          MEMORY      <=    YES         NO         0        0
-swap_rate           sr          MEMORY      >=    YES         NO         0        0
-swap_rsvd           srsv        MEMORY      >=    YES         NO         0        0
-swap_total          st          MEMORY      <=    YES         NO         0        0
-swap_used           su          MEMORY      >=    YES         NO         0        0
-tmpdir              tmp         RESTRING    ==    NO          NO         NONE     0
-virtual_free        vf          MEMORY      <=    YES         NO         0        0
-virtual_total       vt          MEMORY      <=    YES         NO         0        0
-virtual_used        vu          MEMORY      >=    YES         NO         0        0
-EOF
-
-cat << EOF  > /tmp/qconf-ae.txt
-hostname              $(hostname)
-load_scaling          NONE
-complex_values        ram_free=160G,gpu=2
-user_lists            NONE
-xuser_lists           NONE
-projects              NONE
-xprojects             NONE
-usage_scaling         NONE
-report_variables      NONE
-EOF
-
-cat << EOS  > /tmp/qconf-ap.txt
-pe_name            smp
-slots              32
-user_lists         NONE
-xuser_lists        NONE
-start_proc_args    /bin/true
-stop_proc_args     /bin/true
-allocation_rule    \$pe_slots
-control_slaves     FALSE
-job_is_first_task  TRUE
-urgency_slots      min
-accounting_summary FALSE
-EOS
-
-cat << EOF > /tmp/qconf-aq.txt
-qname                 all.q
-hostlist              $WORKER_HOST_STRS
-seq_no                0
-load_thresholds       np_load_avg=1.75
-suspend_thresholds    NONE
-nsuspend              1
-suspend_interval      00:05:00
-priority              0
-min_cpu_interval      00:05:00
-processors            UNDEFINED
-qtype                 BATCH INTERACTIVE
-ckpt_list             NONE
-pe_list               make smp
-rerun                 FALSE
-slots                 $SLOTS
-tmpdir                /tmp
-shell                 /bin/bash
-prolog                NONE
-epilog                NONE
-shell_start_mode      posix_compliant
-starter_method        NONE
-suspend_method        NONE
-resume_method         NONE
-terminate_method      NONE
-notify                00:00:60
-owner_list            NONE
-user_lists            NONE
-xuser_lists           NONE
-subordinate_list      NONE
-complex_values        NONE
-projects              NONE
-xprojects             NONE
-calendar              NONE
-initial_state         default
-s_rt                  INFINITY
-h_rt                  INFINITY
-s_cpu                 INFINITY
-h_cpu                 INFINITY
-s_fsize               INFINITY
-h_fsize               INFINITY
-s_data                INFINITY
-h_data                INFINITY
-s_stack               INFINITY
-h_stack               INFINITY
-s_core                INFINITY
-h_core                INFINITY
-s_rss                 INFINITY
-h_rss                 INFINITY
-s_vmem                INFINITY
-h_vmem                INFINITY
-EOF
diff --git a/website/docs/ecosystem/kaldi/sge/group b/website/docs/ecosystem/kaldi/sge/group
deleted file mode 100644
index 88c6ba8..0000000
--- a/website/docs/ecosystem/kaldi/sge/group
+++ /dev/null
@@ -1,66 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-root:x:0:
-daemon:x:1:
-bin:x:2:
-sys:x:3:
-adm:x:4:
-tty:x:5:
-disk:x:6:
-lp:x:7:
-mail:x:8:
-news:x:9:
-uucp:x:10:
-man:x:12:
-proxy:x:13:
-kmem:x:15:
-dialout:x:20:
-fax:x:21:
-voice:x:22:
-cdrom:x:24:
-floppy:x:25:
-tape:x:26:
-sudo:x:27:
-audio:x:29:
-dip:x:30:
-www-data:x:33:
-backup:x:34:
-operator:x:37:
-list:x:38:
-irc:x:39:
-src:x:40:
-gnats:x:41:
-shadow:x:42:
-utmp:x:43:
-video:x:44:
-sasl:x:45:
-plugdev:x:46:
-staff:x:50:
-games:x:60:
-users:x:100:
-nogroup:x:65534:
-systemd-journal:x:101:
-systemd-timesync:x:102:
-systemd-network:x:103:
-systemd-resolve:x:104:
-systemd-bus-proxy:x:106:
-etcd:x:1099:
-netdev:x:105:
-ssh:x:107:
-ssl-cert:x:108:
-postfix:x:109:
-postdrop:x:110:
-sgeadmin:x:111:
diff --git a/website/docs/ecosystem/kaldi/sge/passwd b/website/docs/ecosystem/kaldi/sge/passwd
deleted file mode 100644
index a39047d..0000000
--- a/website/docs/ecosystem/kaldi/sge/passwd
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-root:x:0:0:root:/root:/bin/bash
-daemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin
-bin:x:2:2:bin:/bin:/usr/sbin/nologin
-sys:x:3:3:sys:/dev:/usr/sbin/nologin
-sync:x:4:65534:sync:/bin:/bin/sync
-games:x:5:60:games:/usr/games:/usr/sbin/nologin
-man:x:6:12:man:/var/cache/man:/usr/sbin/nologin
-lp:x:7:7:lp:/var/spool/lpd:/usr/sbin/nologin
-mail:x:8:8:mail:/var/mail:/usr/sbin/nologin
-news:x:9:9:news:/var/spool/news:/usr/sbin/nologin
-uucp:x:10:10:uucp:/var/spool/uucp:/usr/sbin/nologin
-proxy:x:13:13:proxy:/bin:/usr/sbin/nologin
-www-data:x:33:33:www-data:/var/www:/usr/sbin/nologin
-backup:x:34:34:backup:/var/backups:/usr/sbin/nologin
-list:x:38:38:Mailing List Manager:/var/list:/usr/sbin/nologin
-irc:x:39:39:ircd:/var/run/ircd:/usr/sbin/nologin
-gnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/usr/sbin/nologin
-nobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin
-systemd-timesync:x:100:102:systemd Time Synchronization,,,:/run/systemd:/bin/false
-systemd-network:x:101:103:systemd Network Management,,,:/run/systemd/netif:/bin/false
-systemd-resolve:x:102:104:systemd Resolver,,,:/run/systemd/resolve:/bin/false
-systemd-bus-proxy:x:103:105:systemd Bus Proxy,,,:/run/systemd:/bin/false
-_apt:x:104:65534::/nonexistent:/bin/false
-etcd:x:1099:1099::/home/etcd:
-postfix:x:105:109::/var/spool/postfix:/bin/false
-sgeadmin:x:106:111::/var/lib/gridengine:/bin/false
-sshd:x:107:65534::/var/run/sshd:/usr/sbin/nologin
diff --git a/website/docs/ecosystem/kaldi/sge/resolv.conf b/website/docs/ecosystem/kaldi/sge/resolv.conf
deleted file mode 100644
index e51b279..0000000
--- a/website/docs/ecosystem/kaldi/sge/resolv.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Default system properties included when running spark-submit.
-# This is useful for setting default environmental settings.
-
-nameserver 127.0.0.11
-nameserver < your RegistryDNSIP>
-options timeout:1 ndots:0
diff --git a/website/docs/ecosystem/kaldi/sge/sge_run.sh b/website/docs/ecosystem/kaldi/sge/sge_run.sh
deleted file mode 100644
index d2076ee..0000000
--- a/website/docs/ecosystem/kaldi/sge/sge_run.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Execute command format : sge_run.sh %num_workers% %input_path%
-if [ $# -ne 2 ] 
-then
-    echo "Parameter setting error!"
-    exit 1
-fi
-
-WORKER_NUMBER=$1
-CFG_PATH=$2
-
-HOST_NAME=$(hostname)
-JOB_NAME=`echo $HOST_NAME |awk -F "." '{print($2)}'`
-CLUSTER_USER=`echo $HOST_NAME |awk -F "." '{print($3)}'`
-DOMAIN_SUFFIX=`echo $HOST_NAME |awk -F "." '{for(i=4;i<=NF;i++){if(tostr == ""){tostr=$i}else{tostr=tostr"."$i};if(i==NF)print tostr}}'`
-
-MASTER_HOST=""
-WORKER_HOST_STRS=""
-declare -a WORKER_HOST_LIST
-SLOTS="30"
-
-for ((num=0; num<$WORKER_NUMBER; num++))
-do
-   if [ $num -eq 0 ]
-   then
-       MASTER_HOST="master-0."$JOB_NAME"."$CLUSTER_USER"."$DOMAIN_SUFFIX
-       WORKER_HOST_STRS="master-0."$JOB_NAME"."$CLUSTER_USER"."$DOMAIN_SUFFIX
-       SLOTS=$SLOTS",[master-0."$JOB_NAME"."$CLUSTER_USER"."$DOMAIN_SUFFIX"=48]"
-   else
-       let tmpnum=$num-1
-       WORKER_HOST_STRS=$WORKER_HOST_STRS",worker-"$tmpnum"."$JOB_NAME"."$CLUSTER_USER"."$DOMAIN_SUFFIX
-       WORKER_HOST_LIST+=("worker-"$tmpnum"."$JOB_NAME"."$CLUSTER_USER"."$DOMAIN_SUFFIX)
-       SLOTS=$SLOTS",[worker-"$tmpnum"."$JOB_NAME"."$CLUSTER_USER"."$DOMAIN_SUFFIX"=48]"
-    fi
-done
-
-if [ $(echo $HOST_NAME |grep "^master-") ]
-then
-    sudo su - -c "sleep 30s && export DEBIAN_FRONTEND=noninteractive && apt-get update && \
-    apt-get install -y gridengine-master gridengine-exec gridengine-client;apt-get autoremove -y && apt-get clean && \
-    /etc/init.d/gridengine-master start && /etc/init.d/gridengine-exec start"
-
-    sudo su - -s /bin/bash -c ". ${CFG_PATH}/gencfs.sh $WORKER_HOST_STRS $SLOTS"
-    sudo su -c " qconf -Mc /tmp/qconf-mc.txt && qconf -Ae /tmp/qconf-ae.txt && qconf -as \`hostname\` && 
-    qconf -Ap /tmp/qconf-ap.txt && qconf -Aq /tmp/qconf-aq.txt && qconf -am $CLUSTER_USER && 
-    echo finish master >> ${CFG_PATH}/setcfg.log "
-    for worker_num in ${WORKER_HOST_LIST[@]}
-    do
-        echo  add $worker_num
-        sudo su -c " qconf -ah $worker_num && echo add worker node $worker_num >> ${CFG_PATH}/setcfg.log "
-    done
-
-elif [ $(echo $HOST_NAME |grep "^worker-") ]
-then
-    sudo su - -s /bin/bash -c "sleep 2m && echo please wait && echo please wait >> ${CFG_PATH}/setcfg.log"
-    sudo su - -c "export DEBIAN_FRONTEND=noninteractive &&  apt-get update && \
-    apt-get install -y gridengine-client gridengine-exec; apt-get autoremove -y && apt-get clean"
-    sudo su - -c "echo $MASTER_HOST > /var/lib/gridengine/default/common/act_qmaster"
-    sudo su - -c "/etc/init.d/gridengine-exec start && echo Start SGE for worker is finished >> ${CFG_PATH}/setcfg.log"
-
-    sudo su - -s /bin/bash -c ". ${CFG_PATH}/gencfs.sh $WORKER_HOST_STRS $SLOTS"
-    sudo su - -c "qconf -Me /tmp/qconf-ae.txt && echo done for $HOST_NAME worker. >> ${CFG_PATH}/setcfg.log"
-
-else
-    echo "hostname doesn't match! should start with master or worker!" 1>&2 exit 1
-fi
-
-#sudo su - -s /bin/bash -c "echo sge start to finish >> ${CFG_PATH}/setcfg.log"
diff --git a/website/docs/userDocs/yarn/Dockerfiles.md b/website/docs/userDocs/yarn/Dockerfiles.md
deleted file mode 100644
index f342533..0000000
--- a/website/docs/userDocs/yarn/Dockerfiles.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-title: Write Dockerfiles for Submarine
----
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-[How to write Dockerfile for Submarine TensorFlow jobs](WriteDockerfileTF)
-
-[How to write Dockerfile for Submarine PyTorch jobs](WriteDockerfilePT)
-
-[How to write Dockerfile for Submarine MXNet jobs](WriteDockerfileMX)
diff --git a/website/docs/userDocs/yarn/TestAndTroubleshooting.md b/website/docs/userDocs/yarn/TestAndTroubleshooting.md
deleted file mode 100644
index 8b6ed5c..0000000
--- a/website/docs/userDocs/yarn/TestAndTroubleshooting.md
+++ /dev/null
@@ -1,151 +0,0 @@
----
-title: Test and Troubleshooting
----
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-## Test with a tensorflow job
-
-Distributed-shell + GPU + cgroup
-
-```bash
- ... \
- job run \
- --env DOCKER_JAVA_HOME=/opt/java \
- --env DOCKER_HADOOP_HDFS_HOME=/hadoop-current --name distributed-tf-gpu \
- --env YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK=calico-network \
- --worker_docker_image tf-1.13.1-gpu:0.0.1 \
- --ps_docker_image tf-1.13.1-cpu:0.0.1 \
- --input_path hdfs://${dfs_name_service}/tmp/cifar-10-data \
- --checkpoint_path hdfs://${dfs_name_service}/user/hadoop/tf-distributed-checkpoint \
- --num_ps 0 \
- --ps_resources memory=4G,vcores=2,gpu=0 \
- --ps_launch_cmd "python /test/cifar10_estimator/cifar10_main.py --data-dir=hdfs://${dfs_name_service}/tmp/cifar-10-data --job-dir=hdfs://${dfs_name_service}/tmp/cifar-10-jobdir --num-gpus=0" \
- --worker_resources memory=4G,vcores=2,gpu=1 --verbose \
- --num_workers 1 \
- --worker_launch_cmd "python /test/cifar10_estimator/cifar10_main.py --data-dir=hdfs://${dfs_name_service}/tmp/cifar-10-data --job-dir=hdfs://${dfs_name_service}/tmp/cifar-10-jobdir --train-steps=500 --eval-batch-size=16 --train-batch-size=16 --sync --num-gpus=1"
-```
-
-
-
-## Issues:
-
-### Issue 1: Fail to start nodemanager after system reboot
-
-```
-2018-09-20 18:54:39,785 ERROR org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor: Failed to bootstrap configured resource subsystems!
-org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerException: Unexpected: Cannot create yarn cgroup Subsystem:cpu Mount points:/proc/mounts User:yarn Path:/sys/fs/cgroup/cpu,cpuacct/hadoop-yarn
-  at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandlerImpl.initializePreMountedCGroupController(CGroupsHandlerImpl.java:425)
-  at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandlerImpl.initializeCGroupController(CGroupsHandlerImpl.java:377)
-  at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsCpuResourceHandlerImpl.bootstrap(CGroupsCpuResourceHandlerImpl.java:98)
-  at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsCpuResourceHandlerImpl.bootstrap(CGroupsCpuResourceHandlerImpl.java:87)
-  at org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerChain.bootstrap(ResourceHandlerChain.java:58)
-  at org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor.init(LinuxContainerExecutor.java:320)
-  at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceInit(NodeManager.java:389)
-  at org.apache.hadoop.service.AbstractService.init(AbstractService.java:164)
-  at org.apache.hadoop.yarn.server.nodemanager.NodeManager.initAndStartNodeManager(NodeManager.java:929)
-  at org.apache.hadoop.yarn.server.nodemanager.NodeManager.main(NodeManager.java:997)
-2018-09-20 18:54:39,789 INFO org.apache.hadoop.service.AbstractService: Service NodeManager failed in state INITED
-```
-
-Solution: Grant user yarn the access to  `/sys/fs/cgroup/cpu,cpuacct`, which is the subfolder of cgroup mount destination.
-
-```
-chown :yarn -R /sys/fs/cgroup/cpu,cpuacct
-chmod g+rwx -R /sys/fs/cgroup/cpu,cpuacct
-```
-
-If GPUs are used,the access to cgroup devices folder is neede as well
-
-```
-chown :yarn -R /sys/fs/cgroup/devices
-chmod g+rwx -R /sys/fs/cgroup/devices
-```
-
-
-### Issue 2: container-executor permission denied
-
-```
-2018-09-21 09:36:26,102 WARN org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor: IOException executing command:
-java.io.IOException: Cannot run program "/etc/yarn/sbin/Linux-amd64-64/container-executor": error=13, Permission denied
-        at java.lang.ProcessBuilder.start(ProcessBuilder.java:1048)
-        at org.apache.hadoop.util.Shell.runCommand(Shell.java:938)
-        at org.apache.hadoop.util.Shell.run(Shell.java:901)
-        at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1213)
-```
-
-Solution: The permission of `/etc/yarn/sbin/Linux-amd64-64/container-executor` should be 6050
-
-### Issue 3:How to get docker service log
-
-Solution: we can get docker log with the following command
-
-```
-journalctl -u docker
-```
-
-### Issue 4:docker can't remove containers with errors like `device or resource busy`
-
-```bash
-$ docker rm 0bfafa146431
-Error response from daemon: Unable to remove filesystem for 0bfafa146431771f6024dcb9775ef47f170edb2f1852f71916ba44209ca6120a: remove /app/docker/containers/0bfafa146431771f6024dcb9775ef47f170edb2f152f71916ba44209ca6120a/shm: device or resource busy
-```
-
-Solution: to find which process leads to a `device or resource busy`, we can add a shell script, named `find-busy-mnt.sh`
-
-```bash
-#!/usr/bin/env bash
-
-# A simple script to get information about mount points and pids and their
-# mount namespaces.
-
-if [ $# -ne 1 ];then
-echo "Usage: $0 <devicemapper-device-id>"
-exit 1
-fi
-
-ID=$1
-
-MOUNTS=`find /proc/*/mounts | xargs grep $ID 2>/dev/null`
-
-[ -z "$MOUNTS" ] &&  echo "No pids found" && exit 0
-
-printf "PID\tNAME\t\tMNTNS\n"
-echo "$MOUNTS" | while read LINE; do
-PID=`echo $LINE | cut -d ":" -f1 | cut -d "/" -f3`
-# Ignore self and thread-self
-if [ "$PID" == "self" ] || [ "$PID" == "thread-self" ]; then
-  continue
-fi
-NAME=`ps -q $PID -o comm=`
-MNTNS=`readlink /proc/$PID/ns/mnt`
-printf "%s\t%s\t\t%s\n" "$PID" "$NAME" "$MNTNS"
-done
-```
-
-Kill the process by pid, which is found by the script
-
-```bash
-$ chmod +x find-busy-mnt.sh
-./find-busy-mnt.sh 0bfafa146431771f6024dcb9775ef47f170edb2f152f71916ba44209ca6120a
-# PID   NAME            MNTNS
-# 5007  ntpd            mnt:[4026533598]
-$ kill -9 5007
-```
-
-### Issue 5:Yarn failed to start containers
-
-if the number of GPUs required by applications is larger than the number of GPUs in the cluster, there would be some containers can't be created.
diff --git a/website/docs/userDocs/yarn/WriteDockerfileMX.md b/website/docs/userDocs/yarn/WriteDockerfileMX.md
deleted file mode 100644
index fbe5901..0000000
--- a/website/docs/userDocs/yarn/WriteDockerfileMX.md
+++ /dev/null
@@ -1,90 +0,0 @@
----
-title: Docker Images for MXNet
----
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-## How to create docker images to run MXNet on YARN
-
-Dockerfile to run MXNet on YARN needs two parts:
-
-**Base libraries which MXNet depends on**
-
-1) OS base image, for example ```ubuntu:18.04```
-
-2) MXNet dependent libraries and packages. \
-   For example ```python```, ```scipy```. For GPU support, you also need ```cuda```, ```cudnn```, etc.
-
-3) MXNet package.
-
-**Libraries to access HDFS**
-
-1) JDK
-
-2) Hadoop
-
-Here's an example of a base image (without GPU support) to install MXNet:
-```shell
-FROM ubuntu:18.04
-
-# Install some development tools and packages
-# MXNet 1.6 is going to be the last MXNet release to support Python2
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y tzdata git \
-    wget zip python3 python3-pip python3-distutils libgomp1 libopenblas-dev libopencv-dev
-
-# Install latest MXNet using pip (without GPU support)
-RUN pip3 install mxnet
-
-RUN echo "Install python related packages" && \
-    pip3 install --user graphviz==0.8.4 ipykernel jupyter matplotlib numpy pandas scipy sklearn  && \
-    python3 -m ipykernel.kernelspec
-```
-
-On top of above image, add files, install packages to access HDFS
-```shell
-ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-RUN apt-get update && apt-get install -y openjdk-8-jdk wget
-
-# Install hadoop
-ENV HADOOP_VERSION="3.1.2"
-RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-# If you are in mainland China, you can use the following command.
-# RUN wget http://mirrors.hust.edu.cn/apache/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-
-RUN tar zxf hadoop-${HADOOP_VERSION}.tar.gz
-RUN ln -s hadoop-${HADOOP_VERSION} hadoop-current
-RUN rm hadoop-${HADOOP_VERSION}.tar.gz
-```
-
-Build and push to your own docker registry: Use ```docker build ... ``` and ```docker push ...``` to finish this step.
-
-## Use examples to build your own MXNet docker images
-
-We provided some example Dockerfiles for you to build your own MXNet docker images.
-
-For latest MXNet
-
-- *docker/mxnet/base/ubuntu-18.04/Dockerfile.cpu.mxnet_latest*: Latest MXNet that supports CPU
-- *docker/mxnet/base/ubuntu-18.04/Dockerfile.gpu.mxnet_latest*: Latest MXNet that supports GPU, which is prebuilt to CUDA10.
-
-# Build Docker images
-
-### Manually build Docker image:
-
-Under `docker/mxnet` directory, run `build-all.sh` to build all Docker images. This command will build the following Docker images:
-
-- `mxnet-latest-cpu-base:0.0.1` for base Docker image which includes Hadoop, MXNet
-- `mxnet-latest-gpu-base:0.0.1` for base Docker image which includes Hadoop, MXNet, GPU base libraries.
diff --git a/website/docs/userDocs/yarn/WriteDockerfilePT.md b/website/docs/userDocs/yarn/WriteDockerfilePT.md
deleted file mode 100644
index 31f74f9..0000000
--- a/website/docs/userDocs/yarn/WriteDockerfilePT.md
+++ /dev/null
@@ -1,114 +0,0 @@
----
-title: Docker Images for PyTorch
----
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-## How to create docker images to run PyTorch on YARN
-
-Dockerfile to run PyTorch on YARN needs two parts:
-
-**Base libraries which PyTorch depends on**
-
-1) OS base image, for example ```ubuntu:18.04```
-
-2) PyTorch dependent libraries and packages. For example ```python```, ```scipy```. For GPU support, you also need ```cuda```, ```cudnn```, etc.
-
-3) PyTorch package.
-
-**Libraries to access HDFS**
-
-1) JDK
-
-2) Hadoop
-
-Here's an example of a base image (with GPU support) to install PyTorch:
-```shell
-FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04
-ARG PYTHON_VERSION=3.6
-RUN apt-get update && apt-get install -y --no-install-recommends \
-         build-essential \
-         cmake \
-         git \
-         curl \
-         vim \
-         ca-certificates \
-         libjpeg-dev \
-         libpng-dev \
-         wget &&\
-     rm -rf /var/lib/apt/lists/*
-
-
-RUN curl -o ~/miniconda.sh -O  https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh  && \
-     chmod +x ~/miniconda.sh && \
-     ~/miniconda.sh -b -p /opt/conda && \
-     rm ~/miniconda.sh && \
-     /opt/conda/bin/conda install -y python=$PYTHON_VERSION numpy pyyaml scipy ipython mkl mkl-include cython typing && \
-     /opt/conda/bin/conda install -y -c pytorch magma-cuda100 && \
-     /opt/conda/bin/conda clean -ya
-ENV PATH /opt/conda/bin:$PATH
-RUN pip install ninja
-# This must be done before pip so that requirements.txt is available
-WORKDIR /opt/pytorch
-RUN git clone https://github.com/pytorch/pytorch.git
-WORKDIR pytorch
-RUN git submodule update --init
-RUN TORCH_CUDA_ARCH_LIST="3.5 5.2 6.0 6.1 7.0+PTX" TORCH_NVCC_FLAGS="-Xfatbin -compress-all" \
-    CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" \
-    pip install -v .
-
-WORKDIR /opt/pytorch
-RUN git clone https://github.com/pytorch/vision.git && cd vision && pip install -v .
-
-```
-
-On top of above image, add files, install packages to access HDFS
-```shell
-RUN apt-get update && apt-get install -y openjdk-8-jdk wget
-# Install hadoop
-ENV HADOOP_VERSION="2.9.2"
-RUN wget http://mirrors.hust.edu.cn/apache/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-RUN tar zxf hadoop-${HADOOP_VERSION}.tar.gz
-RUN ln -s hadoop-${HADOOP_VERSION} hadoop-current
-RUN rm hadoop-${HADOOP_VERSION}.tar.gz
-```
-
-Build and push to your own docker registry: Use ```docker build ... ``` and ```docker push ...``` to finish this step.
-
-## Use examples to build your own PyTorch docker images
-
-We provided some example Dockerfiles for you to build your own PyTorch docker images.
-
-For latest PyTorch
-
-- *docker/pytorch/base/ubuntu-18.04/Dockerfile.gpu.pytorch_latest*: Latest Pytorch that supports GPU, which is prebuilt to CUDA10.
-- *docker/pytorch/with-cifar10-models/ubuntu-18.04/Dockerfile.gpu.pytorch_latest*: Latest Pytorch that GPU, which is prebuilt to CUDA10, with models.
-
-## Build Docker images
-
-### Manually build Docker image:
-
-Under `docker/pytorch` directory, run `build-all.sh` to build all Docker images. This command will build the following Docker images:
-
-- `pytorch-latest-gpu-base:0.0.1` for base Docker image which includes Hadoop, PyTorch, GPU base libraries.
-- `pytorch-latest-gpu:0.0.1` which includes cifar10 model as well
-
-### Use prebuilt images
-
-(No liability)
-You can also use prebuilt images for convenience:
-
-- hadoopsubmarine/pytorch-latest-gpu-base:0.0.1
diff --git a/website/docs/userDocs/yarn/WriteDockerfileTF.md b/website/docs/userDocs/yarn/WriteDockerfileTF.md
deleted file mode 100644
index 3cb1d18..0000000
--- a/website/docs/userDocs/yarn/WriteDockerfileTF.md
+++ /dev/null
@@ -1,123 +0,0 @@
----
-title: Docker Images for TensorFlow
----
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-## How to create docker images to run Tensorflow on YARN
-
-Dockerfile to run Tensorflow on YARN need two part:
-
-**Base libraries which Tensorflow depends on**
-
-1) OS base image, for example ```ubuntu:18.04```
-
-2) Tensorflow depended libraries and packages. For example ```python```, ```scipy```. For GPU support, need ```cuda```, ```cudnn```, etc.
-
-3) Tensorflow package.
-
-**Libraries to access HDFS**
-
-1) JDK
-
-2) Hadoop
-
-Here's an example of a base image (w/o GPU support) to install Tensorflow:
-```shell
-FROM ubuntu:18.04
-
-# Pick up some TF dependencies
-RUN apt-get update && apt-get install -y --no-install-recommends \
-        build-essential \
-        curl \
-        libfreetype6-dev \
-        libpng-dev \
-        libzmq3-dev \
-        pkg-config \
-        python \
-        python-dev \
-        rsync \
-        software-properties-common \
-        unzip \
-        && \
-    apt-get clean && \
-    rm -rf /var/lib/apt/lists/*
-
-RUN export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -yq krb5-user libpam-krb5 && apt-get clean
-
-RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
-    python get-pip.py && \
-    rm get-pip.py
-
-RUN pip --no-cache-dir install \
-        Pillow \
-        h5py \
-        ipykernel \
-        jupyter \
-        matplotlib \
-        numpy \
-        pandas \
-        scipy \
-        sklearn \
-        && \
-    python -m ipykernel.kernelspec
-
-RUN pip --no-cache-dir install \
-    http://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.13.1-cp27-none-linux_x86_64.whl
-```
-
-On top of above image, add files, install packages to access HDFS
-```shell
-RUN apt-get update && apt-get install -y openjdk-8-jdk wget
-# Install hadoop
-ENV HADOOP_VERSION="2.9.2"
-RUN wget http://mirrors.hust.edu.cn/apache/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-RUN tar zxf hadoop-${HADOOP_VERSION}.tar.gz
-RUN ln -s hadoop-${HADOOP_VERSION} hadoop-current
-RUN rm hadoop-${HADOOP_VERSION}.tar.gz
-```
-
-Build and push to your own docker registry: Use ```docker build ... ``` and ```docker push ...``` to finish this step.
-
-## Use examples to build your own Tensorflow docker images
-
-We provided following examples for you to build tensorflow docker images.
-
-For Tensorflow 1.13.1 (Precompiled to CUDA 10.x)
-
-- *docker/tensorflow/base/ubuntu-18.04/Dockerfile.cpu.tf_1.13.1*: Tensorflow 1.13.1 supports CPU only.
-- *docker/tensorflow/with-cifar10-models/ubuntu-18.04/Dockerfile.cpu.tf_1.13.1*: Tensorflow 1.13.1 supports CPU only, and included models
-- *docker/tensorflow/base/ubuntu-18.04/Dockerfile.gpu.tf_1.13.1*: Tensorflow 1.13.1 supports GPU, which is prebuilt to CUDA10.
-- *docker/tensorflow/with-cifar10-models/ubuntu-18.04/Dockerfile.gpu.tf_1.13.1*: Tensorflow 1.13.1 supports GPU, which is prebuilt to CUDA10, with models.
-
-## Build Docker images
-
-### Manually build Docker image:
-
-Under `docker/` directory, run `build-all.sh` to build Docker images. It will build following images:
-
-- `tf-1.13.1-gpu-base:0.0.1` for base Docker image which includes Hadoop, Tensorflow, GPU base libraries.
-- `tf-1.13.1-gpu-base:0.0.1` for base Docker image which includes Hadoop. Tensorflow.
-- `tf-1.13.1-gpu:0.0.1` which includes cifar10 model
-- `tf-1.13.1-cpu:0.0.1` which inclues cifar10 model (cpu only).
-
-### Use prebuilt images
-
-(No liability)
-You can also use prebuilt images for convenience:
-
-- hadoopsubmarine/tf-1.13.1-gpu:0.0.1
-- hadoopsubmarine/tf-1.13.1-cpu:0.0.1
diff --git a/website/docs/userDocs/yarn/YARNRuntimeGuide.md b/website/docs/userDocs/yarn/YARNRuntimeGuide.md
deleted file mode 100644
index bd320cf..0000000
--- a/website/docs/userDocs/yarn/YARNRuntimeGuide.md
+++ /dev/null
@@ -1,312 +0,0 @@
----
-title: YARN Runtime Quick Start Guide
----
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-   http://www.apache.org/licenses/LICENSE-2.0
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-## Prerequisite
-
-Check out the [Running Submarine on YARN](../../adminDocs/yarn/README)
-
-## Build your own Docker image
-
-When you follow the documents below, and want to build your own Docker image for Tensorflow/PyTorch/MXNet? Please check out [Build your Docker image](Dockerfiles) for more details.
-
-## Launch TensorFlow Application:
-
-### Without Docker
-
-You need:
-
-* Build a Python virtual environment with TensorFlow 1.13.1 installed
-* A cluster with Hadoop 2.9 or above.
-
-### Building a Python virtual environment with TensorFlow
-
-TonY requires a Python virtual environment zip with TensorFlow and any needed Python libraries already installed.
-
-```
-wget https://files.pythonhosted.org/packages/33/bc/fa0b5347139cd9564f0d44ebd2b147ac97c36b2403943dbee8a25fd74012/virtualenv-16.0.0.tar.gz
-tar xf virtualenv-16.0.0.tar.gz
-
-# Make sure to install using Python 3, as TensorFlow only provides Python 3 artifacts
-python virtualenv-16.0.0/virtualenv.py venv
-. venv/bin/activate
-pip install tensorflow==1.13.1
-zip -r myvenv.zip venv
-deactivate
-```
-
-The above commands will produced a myvenv.zip and it will be used in below example. There's no need to copy it to other nodes. And it is not needed when using Docker to run the job.
-
-
-**Note:** If you require a version of TensorFlow and TensorBoard prior to `1.13.1`, take a look at [this](https://github.com/linkedin/TonY/issues/42) issue.
-
-
-### Get the training examples
-
-Get mnist_distributed.py from https://github.com/linkedin/TonY/tree/master/tony-examples/mnist-tensorflow
-
-
-```
-SUBMARINE_VERSION=<REPLACE_VERSION>
-SUBMARINE_HADOOP_VERSION=3.1
-CLASSPATH=$(hadoop classpath --glob):path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar \
-java org.apache.submarine.client.cli.Cli job run --name tf-job-001 \
- --framework tensorflow \
- --verbose \
- --input_path "" \
- --num_workers 2 \
- --worker_resources memory=1G,vcores=1 \
- --num_ps 1 \
- --ps_resources memory=1G,vcores=1 \
- --worker_launch_cmd "myvenv.zip/venv/bin/python mnist_distributed.py --steps 2 --data_dir /tmp/data --working_dir /tmp/mode" \
- --ps_launch_cmd "myvenv.zip/venv/bin/python mnist_distributed.py --steps 2 --data_dir /tmp/data --working_dir /tmp/mode" \
- --insecure \
- --conf tony.containers.resources=path-to/myvenv.zip#archive,path-to/mnist_distributed.py,path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar
-```
-You should then be able to see links and status of the jobs from command line:
-
-```
-2019-04-22 20:30:42,611 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: worker index: 0 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000003/pi status: RUNNING
-2019-04-22 20:30:42,612 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: worker index: 1 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000004/pi status: RUNNING
-2019-04-22 20:30:42,612 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: ps index: 0 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000002/pi status: RUNNING
-2019-04-22 20:30:42,612 INFO tony.TonyClient: Logs for ps 0 at: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000002/pi
-2019-04-22 20:30:42,612 INFO tony.TonyClient: Logs for worker 0 at: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000003/pi
-2019-04-22 20:30:42,612 INFO tony.TonyClient: Logs for worker 1 at: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000004/pi
-2019-04-22 20:30:44,625 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: ps index: 0 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000002/pi status: FINISHED
-2019-04-22 20:30:44,625 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: worker index: 0 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000003/pi status: FINISHED
-2019-04-22 20:30:44,626 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: worker index: 1 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000004/pi status: FINISHED
-
-```
-
-### With Docker
-
-```
-SUBMARINE_VERSION=<REPLACE_VERSION>
-SUBMARINE_HADOOP_VERSION=3.1
-CLASSPATH=$(hadoop classpath --glob):path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar \
-java org.apache.submarine.client.cli.Cli job run --name tf-job-001 \
- --framework tensorflow \
- --docker_image hadoopsubmarine/tf-1.8.0-cpu:0.0.1 \
- --input_path hdfs://pi-aw:9000/dataset/cifar-10-data \
- --worker_resources memory=3G,vcores=2 \
- --worker_launch_cmd "export CLASSPATH=\$(/hadoop-3.1.0/bin/hadoop classpath --glob) && cd /test/models/tutorials/image/cifar10_estimator && python cifar10_main.py --data-dir=%input_path% --job-dir=%checkpoint_path% --train-steps=10000 --eval-batch-size=16 --train-batch-size=16 --variable-strategy=CPU --num-gpus=0 --sync" \
- --env JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 \
- --env DOCKER_HADOOP_HDFS_HOME=/hadoop-3.1.0 \
- --env DOCKER_JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 \
- --env HADOOP_HOME=/hadoop-3.1.0 \
- --env HADOOP_YARN_HOME=/hadoop-3.1.0 \
- --env HADOOP_COMMON_HOME=/hadoop-3.1.0 \
- --env HADOOP_HDFS_HOME=/hadoop-3.1.0 \
- --env HADOOP_CONF_DIR=/hadoop-3.1.0/etc/hadoop \
- --conf tony.containers.resources=path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar
-```
-#### Notes:
-1) `DOCKER_JAVA_HOME` points to JAVA_HOME inside Docker image.
-
-2) `DOCKER_HADOOP_HDFS_HOME` points to HADOOP_HDFS_HOME inside Docker image.
-
-We removed TonY submodule after applying [SUBMARINE-371](https://issues.apache.org/jira/browse/SUBMARINE-371) and changed to use TonY dependency directly.
-
-After Submarine v0.2.0, there is a uber jar `submarine-all-${SUBMARINE_VERSION}-hadoop-${HADOOP_VERSION}.jar` released together with
-the `submarine-core-${SUBMARINE_VERSION}.jar`, `submarine-yarnservice-runtime-${SUBMARINE_VERSION}.jar` and `submarine-tony-runtime-${SUBMARINE_VERSION}.jar`.
-<br />
-
-## Launch PyTorch Application:
-
-### Without Docker
-
-You need:
-
-* Build a Python virtual environment with PyTorch 0.4.0+ installed
-* A cluster with Hadoop 2.9 or above.
-
-### Building a Python virtual environment with PyTorch
-
-TonY requires a Python virtual environment zip with PyTorch and any needed Python libraries already installed.
-
-```
-wget https://files.pythonhosted.org/packages/33/bc/fa0b5347139cd9564f0d44ebd2b147ac97c36b2403943dbee8a25fd74012/virtualenv-16.0.0.tar.gz
-tar xf virtualenv-16.0.0.tar.gz
-
-python virtualenv-16.0.0/virtualenv.py venv
-. venv/bin/activate
-pip install pytorch==0.4.0
-zip -r myvenv.zip venv
-deactivate
-```
-
-
-### Get the training examples
-
-Get mnist_distributed.py from https://github.com/linkedin/TonY/tree/master/tony-examples/mnist-pytorch
-
-
-```
-SUBMARINE_VERSION=<REPLACE_VERSION>
-SUBMARINE_HADOOP_VERSION=3.1
-CLASSPATH=$(hadoop classpath --glob):path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar \
-java org.apache.submarine.client.cli.Cli job run --name PyTorch-job-001 \
- --framework pytorch
- --num_workers 2 \
- --worker_resources memory=3G,vcores=2 \
- --num_ps 2 \
- --ps_resources memory=3G,vcores=2 \
- --worker_launch_cmd "myvenv.zip/venv/bin/python mnist_distributed.py" \
- --ps_launch_cmd "myvenv.zip/venv/bin/python mnist_distributed.py" \
- --insecure \
- --conf tony.containers.resources=path-to/myvenv.zip#archive,path-to/mnist_distributed.py, \
-path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar
-```
-You should then be able to see links and status of the jobs from command line:
-
-```
-2019-04-22 20:30:42,611 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: worker index: 0 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000003/pi status: RUNNING
-2019-04-22 20:30:42,612 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: worker index: 1 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000004/pi status: RUNNING
-2019-04-22 20:30:42,612 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: ps index: 0 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000002/pi status: RUNNING
-2019-04-22 20:30:42,612 INFO tony.TonyClient: Logs for ps 0 at: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000002/pi
-2019-04-22 20:30:42,612 INFO tony.TonyClient: Logs for worker 0 at: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000003/pi
-2019-04-22 20:30:42,612 INFO tony.TonyClient: Logs for worker 1 at: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000004/pi
-2019-04-22 20:30:44,625 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: ps index: 0 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000002/pi status: FINISHED
-2019-04-22 20:30:44,625 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: worker index: 0 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000003/pi status: FINISHED
-2019-04-22 20:30:44,626 INFO tony.TonyClient: Tasks Status Updated: [TaskInfo] name: worker index: 1 url: http://pi-aw:8042/node/containerlogs/container_1555916523933_0030_01_000004/pi status: FINISHED
-
-```
-
-### With Docker
-
-```
-SUBMARINE_VERSION=<REPLACE_VERSION>
-SUBMARINE_HADOOP_VERSION=3.1
-CLASSPATH=$(hadoop classpath --glob):path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar \
-java org.apache.submarine.client.cli.Cli job run --name PyTorch-job-001 \
- --framework pytorch
- --docker_image pytorch-latest-gpu:0.0.1 \
- --input_path "" \
- --num_workers 1 \
- --worker_resources memory=3G,vcores=2 \
- --worker_launch_cmd "cd /test/ && python cifar10_tutorial.py" \
- --env JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 \
- --env DOCKER_HADOOP_HDFS_HOME=/hadoop-3.1.2 \
- --env DOCKER_JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 \
- --env HADOOP_HOME=/hadoop-3.1.2 \
- --env HADOOP_YARN_HOME=/hadoop-3.1.2 \
- --env HADOOP_COMMON_HOME=/hadoop-3.1.2 \
- --env HADOOP_HDFS_HOME=/hadoop-3.1.2 \
- --env HADOOP_CONF_DIR=/hadoop-3.1.2/etc/hadoop \
- --conf tony.containers.resources=path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar
-```
-
-## Launch MXNet Application:
-
-### Without Docker
-
-You need:
-
-* Build a Python virtual environment with MXNet installed
-* A cluster with Hadoop 2.9 or above.
-
-### Building a Python virtual environment with MXNet
-
-TonY requires a Python virtual environment zip with MXNet and any needed Python libraries already installed.
-
-```
-wget https://files.pythonhosted.org/packages/33/bc/fa0b5347139cd9564f0d44ebd2b147ac97c36b2403943dbee8a25fd74012/virtualenv-16.0.0.tar.gz
-tar xf virtualenv-16.0.0.tar.gz
-
-python virtualenv-16.0.0/virtualenv.py venv
-. venv/bin/activate
-pip install mxnet==1.5.1
-zip -r myvenv.zip venv
-deactivate
-```
-
-
-### Get the training examples
-
-Get image_classification.py from this [link](https://github.com/apache/submarine/blob/master/dev-support/mini-submarine/submarine/image_classification.py)
-
-
-```
-SUBMARINE_VERSION=<REPLACE_VERSION>
-SUBMARINE_HADOOP_VERSION=3.1
-CLASSPATH=$(hadoop classpath --glob):path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar \
-java org.apache.submarine.client.cli.Cli job run --name MXNet-job-001 \
- --framework mxnet
- --input_path "" \
- --num_workers 2 \
- --worker_resources memory=3G,vcores=2 \
- --worker_launch_cmd "myvenv.zip/venv/bin/python image_classification.py --dataset cifar10 --model vgg11 --epochs 1 --kvstore dist_sync" \
- --num_ps 2 \
- --ps_resources memory=3G,vcores=2 \
- --ps_launch_cmd "myvenv.zip/venv/bin/python image_classification.py --dataset cifar10 --model vgg11 --epochs 1 --kvstore dist_sync" \
- --num_schedulers=1 \
- --scheduler_resources memory=1G,vcores=1 \
- --scheduler_launch_cmd="myvenv.zip/venv/bin/python image_classification.py --dataset cifar10 --model vgg11 --epochs 1 --kvstore dist_sync" \
- --insecure \
- --conf tony.containers.resources=path-to/myvenv.zip#archive,path-to/image_classification.py, \
-path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar
-```
-You should then be able to see links and status of the jobs from command line:
-
-```
-2020-04-16 20:23:43,834 INFO tony.TonyClient: Task status updated: [TaskInfo] name: server, index: 1, url: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000004/pi status: RUNNING
-2020-04-16 20:23:43,834 INFO tony.TonyClient: Task status updated: [TaskInfo] name: server, index: 0, url: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000003/pi status: RUNNING
-2020-04-16 20:23:43,834 INFO tony.TonyClient: Task status updated: [TaskInfo] name: worker, index: 1, url: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000006/pi status: RUNNING
-2020-04-16 20:23:43,834 INFO tony.TonyClient: Task status updated: [TaskInfo] name: worker, index: 0, url: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000005/pi status: RUNNING
-2020-04-16 20:23:43,834 INFO tony.TonyClient: Task status updated: [TaskInfo] name: scheduler, index: 0, url: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000002/pi status: RUNNING
-2020-04-16 20:23:43,839 INFO tony.TonyClient: Logs for scheduler 0 at: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000002/pi
-2020-04-16 20:23:43,839 INFO tony.TonyClient: Logs for server 0 at: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000003/pi
-2020-04-16 20:23:43,840 INFO tony.TonyClient: Logs for server 1 at: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000004/pi
-2020-04-16 20:23:43,840 INFO tony.TonyClient: Logs for worker 0 at: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000005/pi
-2020-04-16 20:23:43,840 INFO tony.TonyClient: Logs for worker 1 at: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000006/pi
-2020-04-16 21:02:09,723 INFO tony.TonyClient: Task status updated: [TaskInfo] name: scheduler, index: 0, url: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000002/pi status: SUCCEEDED
-2020-04-16 21:02:09,736 INFO tony.TonyClient: Task status updated: [TaskInfo] name: worker, index: 0, url: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000005/pi status: SUCCEEDED
-2020-04-16 21:02:09,737 INFO tony.TonyClient: Task status updated: [TaskInfo] name: server, index: 1, url: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000004/pi status: SUCCEEDED
-2020-04-16 21:02:09,737 INFO tony.TonyClient: Task status updated: [TaskInfo] name: worker, index: 1, url: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000006/pi status: SUCCEEDED
-2020-04-16 21:02:09,737 INFO tony.TonyClient: Task status updated: [TaskInfo] name: server, index: 0, url: http://pi-aw:8042/node/containerlogs/container_1587037749540_0005_01_000003/pi status: SUCCEEDED
-```
-
-### With Docker
-You could refer to this [sample Dockerfile](docker/mxnet/cifar10/Dockerfile.cifar10.mx_1.5.1) for building your own Docker image.
-```
-SUBMARINE_VERSION=<REPLACE_VERSION>
-SUBMARINE_HADOOP_VERSION=3.1
-CLASSPATH=$(hadoop classpath --glob):path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar \
-java org.apache.submarine.client.cli.Cli job run --name MXNet-job-001 \
- --framework mxnet
- --docker_image <your_docker_image> \
- --input_path "" \
- --num_schedulers 1 \
- --scheduler_resources memory=1G,vcores=1 \
- --scheduler_launch_cmd "/usr/bin/python image_classification.py --dataset cifar10 --model vgg11 --epochs 1 --kvstore dist_sync" \
- --num_workers 2 \
- --worker_resources memory=2G,vcores=1 \
- --worker_launch_cmd "/usr/bin/python image_classification.py --dataset cifar10 --model vgg11 --epochs 1 --kvstore dist_sync" \
- --num_ps 2 \
- --ps_resources memory=2G,vcores=1 \
- --ps_launch_cmd "/usr/bin/python image_classification.py --dataset cifar10 --model vgg11 --epochs 1 --kvstore dist_sync" \
- --verbose \
- --insecure \
- --conf tony.containers.resources=path-to/image_classification.py,path-to/submarine-all-${SUBMARINE_VERSION}-hadoop-${SUBMARINE_HADOOP_VERSION}.jar
-```
-
-## Use YARN Service to run Submarine: Deprecated
-
-Historically, Submarine supports to use [YARN Service](https://hadoop.apache.org/docs/r3.1.0/hadoop-yarn/hadoop-yarn-site/yarn-service/Overview.html) to submit deep learning jobs. Now we stop supporting it because YARN service is not actively developed by community, and extra dependencies such as RegistryDNS/ATS-v2 causes lots of issues for setup.
-
-As of now, you can still use YARN service to run Submarine, but code will be removed in the future release. We will only support use TonY when use Submarine on YARN.
diff --git a/website/docs/userDocs/yarn/docker/mxnet/base/ubuntu-18.04/Dockerfile.cpu.mx_latest b/website/docs/userDocs/yarn/docker/mxnet/base/ubuntu-18.04/Dockerfile.cpu.mx_latest
deleted file mode 100644
index d1f062d..0000000
--- a/website/docs/userDocs/yarn/docker/mxnet/base/ubuntu-18.04/Dockerfile.cpu.mx_latest
+++ /dev/null
@@ -1,49 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM ubuntu:18.04
-
-# Install some development tools and packages
-# MXNet 1.6 is going to be the last MXNet release to support Python2
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y tzdata git \
-    wget zip python3 python3-pip python3-distutils libgomp1 libopenblas-dev libopencv-dev
-
-# Install latest MXNet using pip
-RUN pip3 install mxnet
-
-# Install JDK
-ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-RUN echo "$LOG_TAG Install java8" && \
-    apt-get update && \
-    apt-get install -y --no-install-recommends openjdk-8-jdk && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-
-# Install Hadoop
-WORKDIR /
-RUN echo "Install Hadoop 3.1.2"
-ENV HADOOP_VERSION="3.1.2"
-RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-RUN tar zxf hadoop-${HADOOP_VERSION}.tar.gz
-RUN ln -s hadoop-${HADOOP_VERSION} hadoop-current
-RUN rm hadoop-${HADOOP_VERSION}.tar.gz
-
-RUN echo "Install python related packages" && \
-    pip3 install --user graphviz==0.8.4 ipykernel jupyter matplotlib numpy pandas scipy sklearn  && \
-    python3 -m ipykernel.kernelspec
-
-# Set the locale to fix bash warning: setlocale: LC_ALL: cannot change locale (en_US.UTF-8)
-RUN apt-get update && apt-get install -y --no-install-recommends locales && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-RUN locale-gen en_US.UTF-8
diff --git a/website/docs/userDocs/yarn/docker/mxnet/base/ubuntu-18.04/Dockerfile.gpu.mx_latest b/website/docs/userDocs/yarn/docker/mxnet/base/ubuntu-18.04/Dockerfile.gpu.mx_latest
deleted file mode 100644
index ecfc752..0000000
--- a/website/docs/userDocs/yarn/docker/mxnet/base/ubuntu-18.04/Dockerfile.gpu.mx_latest
+++ /dev/null
@@ -1,49 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04
-
-# Install some development tools and packages
-# MXNet 1.6 is going to be the last MXNet release to support Python2
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y tzdata git \
-    wget zip python3 python3-pip python3-distutils libgomp1 libopenblas-dev libopencv-dev
-
-# Install latest MXNet with CUDA-10.0 using pip
-RUN pip3 install mxnet-cu100
-
-# Install JDK
-ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-RUN echo "$LOG_TAG Install java8" && \
-    apt-get update && \
-    apt-get install -y --no-install-recommends openjdk-8-jdk && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-
-# Install Hadoop
-WORKDIR /
-RUN echo "Install Hadoop 3.1.2"
-ENV HADOOP_VERSION="3.1.2"
-RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-RUN tar zxf hadoop-${HADOOP_VERSION}.tar.gz
-RUN ln -s hadoop-${HADOOP_VERSION} hadoop-current
-RUN rm hadoop-${HADOOP_VERSION}.tar.gz
-
-RUN echo "Install python related packages" && \
-    pip3 install --user graphviz==0.8.4 ipykernel jupyter matplotlib numpy pandas scipy sklearn  && \
-    python3 -m ipykernel.kernelspec
-
-# Set the locale to fix bash warning: setlocale: LC_ALL: cannot change locale (en_US.UTF-8)
-RUN apt-get update && apt-get install -y --no-install-recommends locales && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-RUN locale-gen en_US.UTF-8
diff --git a/website/docs/userDocs/yarn/docker/mxnet/build-all.sh b/website/docs/userDocs/yarn/docker/mxnet/build-all.sh
deleted file mode 100755
index 5aab922..0000000
--- a/website/docs/userDocs/yarn/docker/mxnet/build-all.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-echo "Building base images"
-
-set -euo pipefail
-
-cd base/ubuntu-18.04
-
-docker build . -f Dockerfile.cpu.mx_latest -t mxnet-latest-cpu-base:0.0.1
-docker build . -f Dockerfile.gpu.mx_latest -t mxnet-latest-gpu-base:0.0.1
-echo "Finished building base images"
diff --git a/website/docs/userDocs/yarn/docker/mxnet/cifar10/Dockerfile.cifar10.mx_1.5.1 b/website/docs/userDocs/yarn/docker/mxnet/cifar10/Dockerfile.cifar10.mx_1.5.1
deleted file mode 100644
index 313b0f8..0000000
--- a/website/docs/userDocs/yarn/docker/mxnet/cifar10/Dockerfile.cifar10.mx_1.5.1
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM ubuntu:18.04
-
-RUN apt-get update && apt-get install -y git wget zip python3 python3-pip \
-    python3-distutils openjdk-8-jdk libgomp1 apt-transport-https ca-certificates curl \
-    gnupg-agent software-properties-common
-
-RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
-RUN add-apt-repository \
-   "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
-   $(lsb_release -cs) \
-   stable"
-
-ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-RUN ln -s /usr/bin/python3 /usr/bin/python
-
-# Install MXNet
-RUN pip3 install "mxnet==1.5.1"
-
-# Install hadoop 3.1.0+ supported YARN service
-ENV HADOOP_VERSION="3.1.2"
-RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-# If you are in mainland China, you can use the following command.
-# RUN wget http://mirrors.shu.edu.cn/apache/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-
-RUN tar -xvf hadoop-${HADOOP_VERSION}.tar.gz -C /opt/
-RUN rm hadoop-${HADOOP_VERSION}.tar.gz
-
-# Copy the $HADOOP_CONF_DIR folder as "hadoop" folder in the same dir as dockerfile .
-# ├── Dockerfile.cifar10_mx_1.5.1
-# └── hadoop
-#     ├── capacity-scheduler.xml
-#     ├── configuration.xsl
-#     ...
-COPY hadoop /opt/hadoop-$HADOOP_VERSION/etc/hadoop
-
-# Config Hadoop env
-ENV HADOOP_HOME=/opt/hadoop-$HADOOP_VERSION/
-ENV HADOOP_YARN_HOME=/opt/hadoop-$HADOOP_VERSION/
-ENV HADOOP_HDFS_HOME=/opt/hadoop-$HADOOP_VERSION/
-ENV HADOOP_CONF_DIR=/opt/hadoop-$HADOOP_VERSION/etc/hadoop
-ENV HADOOP_COMMON_HOME=/opt/hadoop-$HADOOP_VERSION
-ENV HADOOP_MAPRED_HOME=/opt/hadoop-$HADOOP_VERSION
-
-# Create a user, make sure the user groups are the same as your host
-# and the container user's UID is same as your host's.
-RUN groupadd -g 5000 hadoop
-RUN useradd -u 1000 -g hadoop pi
diff --git a/website/docs/userDocs/yarn/docker/pytorch/base/ubuntu-18.04/Dockerfile.gpu.pytorch_latest b/website/docs/userDocs/yarn/docker/pytorch/base/ubuntu-18.04/Dockerfile.gpu.pytorch_latest
deleted file mode 100644
index 8404b42..0000000
--- a/website/docs/userDocs/yarn/docker/pytorch/base/ubuntu-18.04/Dockerfile.gpu.pytorch_latest
+++ /dev/null
@@ -1,77 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04
-ARG PYTHON_VERSION=3.6
-RUN apt-get update && apt-get install -y --no-install-recommends \
-         build-essential \
-         cmake \
-         git \
-         curl \
-         vim \
-         ca-certificates \
-         libjpeg-dev \
-         libpng-dev \
-         wget &&\
-     rm -rf /var/lib/apt/lists/*
-
-
-RUN curl -o ~/miniconda.sh -O  https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh  && \
-     chmod +x ~/miniconda.sh && \
-     ~/miniconda.sh -b -p /opt/conda && \
-     rm ~/miniconda.sh && \
-     /opt/conda/bin/conda install -y python=$PYTHON_VERSION numpy pyyaml scipy ipython mkl mkl-include cython typing && \
-     /opt/conda/bin/conda install -y -c pytorch magma-cuda100 && \
-     /opt/conda/bin/conda clean -ya
-ENV PATH /opt/conda/bin:$PATH
-RUN pip install ninja
-# This must be done before pip so that requirements.txt is available
-WORKDIR /opt/pytorch
-RUN git clone https://github.com/pytorch/pytorch.git
-WORKDIR pytorch
-RUN git submodule update --init
-RUN TORCH_CUDA_ARCH_LIST="3.5 5.2 6.0 6.1 7.0+PTX" TORCH_NVCC_FLAGS="-Xfatbin -compress-all" \
-    CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" \
-    pip install -v .
-
-WORKDIR /opt/pytorch
-RUN git clone https://github.com/pytorch/vision.git && cd vision && pip install -v .
-
-WORKDIR /
-# Install Hadoop
-ENV HADOOP_VERSION="3.1.2"
-RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-RUN tar zxf hadoop-${HADOOP_VERSION}.tar.gz
-RUN ln -s hadoop-${HADOOP_VERSION} hadoop-current
-RUN rm hadoop-${HADOOP_VERSION}.tar.gz
-
-ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-RUN echo "$LOG_TAG Install java8" && \
-    apt-get update && \
-    apt-get install -y --no-install-recommends openjdk-8-jdk && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-
-RUN echo "Install python related packages" && \
-    pip --no-cache-dir install Pillow h5py ipykernel jupyter matplotlib numpy pandas scipy sklearn && \
-    python -m ipykernel.kernelspec
-
-# Set the locale to fix bash warning: setlocale: LC_ALL: cannot change locale (en_US.UTF-8)
-RUN apt-get update && apt-get install -y --no-install-recommends locales && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-RUN locale-gen en_US.UTF-8
-
-
-WORKDIR /workspace
-RUN chmod -R a+w /workspace
\ No newline at end of file
diff --git a/website/docs/userDocs/yarn/docker/pytorch/build-all.sh b/website/docs/userDocs/yarn/docker/pytorch/build-all.sh
deleted file mode 100755
index f0f795c..0000000
--- a/website/docs/userDocs/yarn/docker/pytorch/build-all.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-echo "Building base images"
-
-set -euo pipefail
-
-cd base/ubuntu-18.04
-
-docker build . -f Dockerfile.gpu.pytorch_latest -t pytorch-latest-gpu-base:0.0.1
-
-echo "Finished building base images"
-
-cd ../../with-cifar10-models/ubuntu-18.04
-
-docker build . -f Dockerfile.gpu.pytorch_latest -t pytorch-latest-gpu:0.0.1
diff --git a/website/docs/userDocs/yarn/docker/pytorch/with-cifar10-models/cifar10_tutorial.py b/website/docs/userDocs/yarn/docker/pytorch/with-cifar10-models/cifar10_tutorial.py
deleted file mode 100644
index c3bb991..0000000
--- a/website/docs/userDocs/yarn/docker/pytorch/with-cifar10-models/cifar10_tutorial.py
+++ /dev/null
@@ -1,348 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#    http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -*- coding: utf-8 -*-
-"""
-Training a Classifier
-=====================
-
-This is it. You have seen how to define neural networks, compute loss and make
-updates to the weights of the network.
-
-Now you might be thinking,
-
-What about data?
-----------------
-
-Generally, when you have to deal with image, text, audio or video data,
-you can use standard python packages that load data into a numpy array.
-Then you can convert this array into a ``torch.*Tensor``.
-
--  For images, packages such as Pillow, OpenCV are useful
--  For audio, packages such as scipy and librosa
--  For text, either raw Python or Cython based loading, or NLTK and
-   SpaCy are useful
-
-Specifically for vision, we have created a package called
-``torchvision``, that has data loaders for common datasets such as
-Imagenet, CIFAR10, MNIST, etc. and data transformers for images, viz.,
-``torchvision.datasets`` and ``torch.utils.data.DataLoader``.
-
-This provides a huge convenience and avoids writing boilerplate code.
-
-For this tutorial, we will use the CIFAR10 dataset.
-It has the classes: ‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’,
-‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’. The images in CIFAR-10 are of
-size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size.
-
-.. figure:: /_static/img/cifar10.png
-   :alt: cifar10
-
-   cifar10
-
-
-Training an image classifier
-----------------------------
-
-We will do the following steps in order:
-
-1. Load and normalizing the CIFAR10 training and test datasets using
-   ``torchvision``
-2. Define a Convolutional Neural Network
-3. Define a loss function
-4. Train the network on the training data
-5. Test the network on the test data
-
-1. Loading and normalizing CIFAR10
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Using ``torchvision``, it’s extremely easy to load CIFAR10.
-"""
-import torch
-import torchvision
-import torchvision.transforms as transforms
-
-########################################################################
-# The output of torchvision datasets are PILImage images of range [0, 1].
-# We transform them to Tensors of normalized range [-1, 1].
-
-transform = transforms.Compose(
-    [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
-)
-
-trainset = torchvision.datasets.CIFAR10(
-    root="./data", train=True, download=True, transform=transform
-)
-trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)
-
-testset = torchvision.datasets.CIFAR10(
-    root="./data", train=False, download=True, transform=transform
-)
-testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)
-
-classes = ("plane", "car", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck")
-
-########################################################################
-# Let us show some of the training images, for fun.
-
-import matplotlib.pyplot as plt
-import numpy as np
-
-# functions to show an image
-
-
-def imshow(img):
-    img = img / 2 + 0.5  # unnormalize
-    npimg = img.numpy()
-    plt.imshow(np.transpose(npimg, (1, 2, 0)))
-    plt.show()
-
-
-# get some random training images
-dataiter = iter(trainloader)
-images, labels = dataiter.next()
-
-# show images
-imshow(torchvision.utils.make_grid(images))
-# print labels
-print(" ".join("%5s" % classes[labels[j]] for j in range(4)))
-
-########################################################################
-# 2. Define a Convolutional Neural Network
-# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-# Copy the neural network from the Neural Networks section before and modify it to
-# take 3-channel images (instead of 1-channel images as it was defined).
-
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-class Net(nn.Module):
-    def __init__(self):
-        super(Net, self).__init__()
-        self.conv1 = nn.Conv2d(3, 6, 5)
-        self.pool = nn.MaxPool2d(2, 2)
-        self.conv2 = nn.Conv2d(6, 16, 5)
-        self.fc1 = nn.Linear(16 * 5 * 5, 120)
-        self.fc2 = nn.Linear(120, 84)
-        self.fc3 = nn.Linear(84, 10)
-
-    def forward(self, x):
-        x = self.pool(F.relu(self.conv1(x)))
-        x = self.pool(F.relu(self.conv2(x)))
-        x = x.view(-1, 16 * 5 * 5)
-        x = F.relu(self.fc1(x))
-        x = F.relu(self.fc2(x))
-        x = self.fc3(x)
-        return x
-
-
-net = Net()
-
-########################################################################
-# 3. Define a Loss function and optimizer
-# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-# Let's use a Classification Cross-Entropy loss and SGD with momentum.
-
-import torch.optim as optim
-
-criterion = nn.CrossEntropyLoss()
-optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
-
-########################################################################
-# 4. Train the network
-# ^^^^^^^^^^^^^^^^^^^^
-#
-# This is when things start to get interesting.
-# We simply have to loop over our data iterator, and feed the inputs to the
-# network and optimize.
-
-for epoch in range(2):  # loop over the dataset multiple times
-
-    running_loss = 0.0
-    for i, data in enumerate(trainloader, 0):
-        # get the inputs
-        inputs, labels = data
-
-        # zero the parameter gradients
-        optimizer.zero_grad()
-
-        # forward + backward + optimize
-        outputs = net(inputs)
-        loss = criterion(outputs, labels)
-        loss.backward()
-        optimizer.step()
-
-        # print statistics
-        running_loss += loss.item()
-        if i % 2000 == 1999:  # print every 2000 mini-batches
-            print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / 2000))
-            running_loss = 0.0
-
-print("Finished Training")
-
-########################################################################
-# 5. Test the network on the test data
-# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-#
-# We have trained the network for 2 passes over the training dataset.
-# But we need to check if the network has learnt anything at all.
-#
-# We will check this by predicting the class label that the neural network
-# outputs, and checking it against the ground-truth. If the prediction is
-# correct, we add the sample to the list of correct predictions.
-#
-# Okay, first step. Let us display an image from the test set to get familiar.
-
-dataiter = iter(testloader)
-images, labels = dataiter.next()
-
-# print images
-imshow(torchvision.utils.make_grid(images))
-print("GroundTruth: ", " ".join("%5s" % classes[labels[j]] for j in range(4)))
-
-########################################################################
-# Okay, now let us see what the neural network thinks these examples above are:
-
-outputs = net(images)
-
-########################################################################
-# The outputs are energies for the 10 classes.
-# The higher the energy for a class, the more the network
-# thinks that the image is of the particular class.
-# So, let's get the index of the highest energy:
-_, predicted = torch.max(outputs, 1)
-
-print("Predicted: ", " ".join("%5s" % classes[predicted[j]] for j in range(4)))
-
-########################################################################
-# The results seem pretty good.
-#
-# Let us look at how the network performs on the whole dataset.
-
-correct = 0
-total = 0
-with torch.no_grad():
-    for data in testloader:
-        images, labels = data
-        outputs = net(images)
-        _, predicted = torch.max(outputs.data, 1)
-        total += labels.size(0)
-        correct += (predicted == labels).sum().item()
-
-print("Accuracy of the network on the 10000 test images: %d %%" % (100 * correct / total))
-
-########################################################################
-# That looks waaay better than chance, which is 10% accuracy (randomly picking
-# a class out of 10 classes).
-# Seems like the network learnt something.
-#
-# Hmmm, what are the classes that performed well, and the classes that did
-# not perform well:
-
-class_correct = list(0.0 for i in range(10))
-class_total = list(0.0 for i in range(10))
-with torch.no_grad():
-    for data in testloader:
-        images, labels = data
-        outputs = net(images)
-        _, predicted = torch.max(outputs, 1)
-        c = (predicted == labels).squeeze()
-        for i in range(4):
-            label = labels[i]
-            class_correct[label] += c[i].item()
-            class_total[label] += 1
-
-for i in range(10):
-    print("Accuracy of %5s : %2d %%" % (classes[i], 100 * class_correct[i] / class_total[i]))
-
-########################################################################
-# Okay, so what next?
-#
-# How do we run these neural networks on the GPU?
-#
-# Training on GPU
-# ----------------
-# Just like how you transfer a Tensor onto the GPU, you transfer the neural
-# net onto the GPU.
-#
-# Let's first define our device as the first visible cuda device if we have
-# CUDA available:
-
-device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
-
-# Assuming that we are on a CUDA machine, this should print a CUDA device:
-
-print(device)
-
-########################################################################
-# The rest of this section assumes that ``device`` is a CUDA device.
-#
-# Then these methods will recursively go over all modules and convert their
-# parameters and buffers to CUDA tensors:
-#
-# .. code:: python
-#
-#     net.to(device)
-#
-#
-# Remember that you will have to send the inputs and targets at every step
-# to the GPU too:
-#
-# .. code:: python
-#
-#         inputs, labels = inputs.to(device), labels.to(device)
-#
-# Why dont I notice MASSIVE speedup compared to CPU? Because your network
-# is realllly small.
-#
-# **Exercise:** Try increasing the width of your network (argument 2 of
-# the first ``nn.Conv2d``, and argument 1 of the second ``nn.Conv2d`` –
-# they need to be the same number), see what kind of speedup you get.
-#
-# **Goals achieved**:
-#
-# - Understanding PyTorch's Tensor library and neural networks at a high level.
-# - Train a small neural network to classify images
-#
-# Training on multiple GPUs
-# -------------------------
-# If you want to see even more MASSIVE speedup using all of your GPUs,
-# please check out :doc:`data_parallel_tutorial`.
-#
-# Where do I go next?
-# -------------------
-#
-# -  :doc:`Train neural nets to play video games </intermediate/reinforcement_q_learning>`
-# -  `Train a state-of-the-art ResNet network on imagenet`_
-# -  `Train a face generator using Generative Adversarial Networks`_
-# -  `Train a word-level language model using Recurrent LSTM networks`_
-# -  `More examples`_
-# -  `More tutorials`_
-# -  `Discuss PyTorch on the Forums`_
-# -  `Chat with other users on Slack`_
-#
-# .. _Train a state-of-the-art ResNet network on imagenet: https://github.com/pytorch/examples/tree/master/imagenet
-# .. _Train a face generator using Generative Adversarial Networks: https://github.com/pytorch/examples/tree/master/dcgan
-# .. _Train a word-level language model using Recurrent LSTM networks: https://github.com/pytorch/examples/tree/master/word_language_model
-# .. _More examples: https://github.com/pytorch/examples
-# .. _More tutorials: https://github.com/pytorch/tutorials
-# .. _Discuss PyTorch on the Forums: https://discuss.pytorch.org/
-# .. _Chat with other users on Slack: https://pytorch.slack.com/messages/beginner/
-
-# %%%%%%INVISIBLE_CODE_BLOCK%%%%%%
-del dataiter
-# %%%%%%INVISIBLE_CODE_BLOCK%%%%%%
diff --git a/website/docs/userDocs/yarn/docker/pytorch/with-cifar10-models/ubuntu-18.04/Dockerfile.gpu.pytorch_latest b/website/docs/userDocs/yarn/docker/pytorch/with-cifar10-models/ubuntu-18.04/Dockerfile.gpu.pytorch_latest
deleted file mode 100644
index 83e8fde..0000000
--- a/website/docs/userDocs/yarn/docker/pytorch/with-cifar10-models/ubuntu-18.04/Dockerfile.gpu.pytorch_latest
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM pytorch-latest-gpu-base:0.0.1
-
-RUN mkdir -p /test/data
-RUN chmod -R 777 /test
-ADD cifar10_tutorial.py /test/cifar10_tutorial.py
\ No newline at end of file
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/base/ubuntu-18.04/Dockerfile.cpu.tf_1.13.1 b/website/docs/userDocs/yarn/docker/tensorflow/base/ubuntu-18.04/Dockerfile.cpu.tf_1.13.1
deleted file mode 100644
index 0e575e6..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/base/ubuntu-18.04/Dockerfile.cpu.tf_1.13.1
+++ /dev/null
@@ -1,75 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM ubuntu:18.04
-
-# Pick up some TF dependencies
-RUN export DEBIAN_FRONTEND=noninteractive && apt-get update && \
-        apt-get install -y --allow-downgrades --no-install-recommends \
-        --allow-change-held-packages --allow-unauthenticated \
-        build-essential libfreetype6-dev libpng-dev \
-        libzmq3-dev pkg-config python python-dev \
-        rsync software-properties-common curl unzip wget grep sed vim iputils-ping net-tools gdb python2.7-dbg tzdata && \
-        apt-get clean && rm -rf /var/lib/apt/lists/*
-
-RUN export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -yq --no-install-recommends \
-        krb5-user libpam-krb5 && \
-        apt-get clean && rm -rf /var/lib/apt/lists/*
-
-RUN wget https://bootstrap.pypa.io/get-pip.py && \
-    python get-pip.py && \
-    rm get-pip.py
-
-RUN echo "Install python related packages" && \
-    apt-get update && \
-    apt-get install -y --no-install-recommends gfortran \
-    # numerical/algebra packages
-    libblas-dev libatlas-base-dev liblapack-dev \
-    # font, image for matplotlib
-    libpng-dev libxft-dev \
-    # for tkinter
-    python-tk libxml2-dev libxslt-dev zlib1g-dev && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-
-RUN pip --no-cache-dir install Pillow h5py ipykernel jupyter matplotlib numpy pandas scipy sklearn && \
-    python -m ipykernel.kernelspec
-
-# Install TensorFlow CPU version.
-ENV TENSORFLOW_VERSION="1.13.1"
-RUN pip --no-cache-dir install \
-    http://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-${TENSORFLOW_VERSION}-cp27-none-linux_x86_64.whl
-RUN apt-get update && apt-get install -y --no-install-recommends git && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-
-# Install hadoop
-ENV HADOOP_VERSION="3.1.2"
-RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-# If you are in mainland China, you can use the following command.
-# RUN wget http://mirrors.shu.edu.cn/apache/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-
-RUN tar zxf hadoop-${HADOOP_VERSION}.tar.gz
-RUN ln -s hadoop-${HADOOP_VERSION} hadoop-current
-RUN rm hadoop-${HADOOP_VERSION}.tar.gz
-
-ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-RUN echo "$LOG_TAG Install java8" && \
-    apt-get update && \
-    apt-get install -y --no-install-recommends openjdk-8-jdk && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-
-# Set the locale to fix bash warning: setlocale: LC_ALL: cannot change locale (en_US.UTF-8)
-RUN apt-get update && apt-get install -y --no-install-recommends locales && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-RUN locale-gen en_US.UTF-8
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/base/ubuntu-18.04/Dockerfile.gpu.tf_1.13.1 b/website/docs/userDocs/yarn/docker/tensorflow/base/ubuntu-18.04/Dockerfile.gpu.tf_1.13.1
deleted file mode 100644
index dc7027c..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/base/ubuntu-18.04/Dockerfile.gpu.tf_1.13.1
+++ /dev/null
@@ -1,89 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04
-
-# Pick up some TF dependencies
-RUN export DEBIAN_FRONTEND=noninteractive && apt-get update && \
-        apt-get install -y --allow-downgrades --no-install-recommends \
-        --allow-change-held-packages --allow-unauthenticated \
-        build-essential libfreetype6-dev \
-        libzmq3-dev pkg-config python python-dev \
-        rsync software-properties-common curl unzip wget grep sed vim \
-        iputils-ping net-tools gdb python2.7-dbg tzdata \
-        cuda-command-line-tools-10-0 cuda-cublas-10-0 \
-        cuda-cufft-10-0 cuda-curand-10-0 cuda-cusolver-10-0 \
-        cuda-cusparse-10-0 libcudnn7=7.4.1.5-1+cuda10.0 && \
-        apt-get clean && rm -rf /var/lib/apt/lists/*
-
-# Install TensorRT
-RUN apt-get update && \
-        apt-get install -y --allow-unauthenticated --no-install-recommends \
-        nvinfer-runtime-trt-repo-ubuntu1804-5.0.2-ga-cuda10.0 && \
-        apt-get update && \
-        apt-get install -y --no-install-recommends \
-        libnvinfer5=5.0.2-1+cuda10.0 && \
-        apt-get clean && rm -rf /var/lib/apt/lists/*
-
-
-RUN export DEBIAN_FRONTEND=noninteractive && apt-get update && \
-        apt-get install -yq --no-install-recommends krb5-user libpam-krb5 \
-        && apt-get clean && rm -rf /var/lib/apt/lists/*
-
-RUN wget https://bootstrap.pypa.io/get-pip.py && \
-    python get-pip.py && \
-    rm get-pip.py
-
-RUN echo "Install python related packages" && \
-    apt-get -y update && \
-    apt-get install -y --no-install-recommends gfortran \
-    # numerical/algebra packages
-    libblas-dev libatlas-base-dev liblapack-dev \
-    # font, image for matplotlib
-    libpng-dev libxft-dev \
-    # for tkinter
-    python-tk libxml2-dev libxslt-dev zlib1g-dev && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-
-RUN pip --no-cache-dir install Pillow h5py ipykernel jupyter matplotlib numpy pandas scipy sklearn && \
-    python -m ipykernel.kernelspec
-
-# Install TensorFlow GPU version.
-ENV TENSORFLOW_VERSION="1.13.1"
-RUN pip --no-cache-dir install \
-    http://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-${TENSORFLOW_VERSION}-cp27-none-linux_x86_64.whl
-RUN apt-get update && apt-get install -y --no-install-recommends git && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-
-# Install hadoop
-ENV HADOOP_VERSION="3.1.2"
-RUN wget https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-# If you are in mainland China, you can use the following command.
-# RUN wget http://mirrors.shu.edu.cn/apache/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
-
-RUN tar zxf hadoop-${HADOOP_VERSION}.tar.gz
-RUN ln -s hadoop-${HADOOP_VERSION} hadoop-current
-RUN rm hadoop-${HADOOP_VERSION}.tar.gz
-
-ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-RUN echo "$LOG_TAG Install java8" && \
-    apt-get -y update && \
-    apt-get install -y --no-install-recommends openjdk-8-jdk && \
-    rm -rf /var/lib/apt/lists/*
-
-# Set the locale to fix bash warning: setlocale: LC_ALL: cannot change locale (en_US.UTF-8)
-RUN apt-get update && apt-get install -y --no-install-recommends locales && \
-    apt-get clean && rm -rf /var/lib/apt/lists/*
-RUN locale-gen en_US.UTF-8
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/build-all.sh b/website/docs/userDocs/yarn/docker/tensorflow/build-all.sh
deleted file mode 100755
index 5c02379..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/build-all.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-echo "Building base images"
-
-set -euo pipefail
-
-cd base/ubuntu-18.04
-
-docker build . -f Dockerfile.cpu.tf_1.13.1 -t tf-1.13.1-cpu-base:0.0.1
-docker build . -f Dockerfile.gpu.tf_1.13.1 -t tf-1.13.1-gpu-base:0.0.1
-
-echo "Finished building base images"
-
-cd ../../with-cifar10-models/ubuntu-18.04
-
-docker build . -f Dockerfile.cpu.tf_1.13.1 -t tf-1.13.1-cpu:0.0.1
-docker build . -f Dockerfile.gpu.tf_1.13.1 -t tf-1.13.1-gpu:0.0.1
-
-cd ../../mnist
-docker build . -f Dockerfile.tony.tf.mnist.tf_1.13.1 -t tony-mnist-tf-1.13.1:0.0.1
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/mnist/Dockerfile.tony.tf.mnist.tf_1.13.1 b/website/docs/userDocs/yarn/docker/tensorflow/mnist/Dockerfile.tony.tf.mnist.tf_1.13.1
deleted file mode 100644
index 86874d4..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/mnist/Dockerfile.tony.tf.mnist.tf_1.13.1
+++ /dev/null
@@ -1,72 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM ubuntu:18.04
-ENV HADOOP_VERSION 2.9.2
-ENV HADOOP_URL https://www.apache.org/dist/hadoop/common/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz
-RUN apt-get update
-RUN apt-get -y install apt-transport-https \
-     ca-certificates \
-     curl \
-     gnupg2 \
-     git \
-     software-properties-common \
-     openjdk-8-jdk vim \
-     wget python3-distutils
-RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
-RUN add-apt-repository \
-   "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
-   $(lsb_release -cs) \
-   stable"
-
-# Download Hadoop binaries.
-RUN set -x \
-    && curl -fSL "$HADOOP_URL" -o /tmp/hadoop.tar.gz \
-    && tar -xvf /tmp/hadoop.tar.gz -C /opt/ \
-    && rm /tmp/hadoop.tar.gz*
-
-# Copy the $HADOOP_CONF_DIR folder as "hadoop" folder in the same dir as dockerfile
-# pi@pi-aw:~/apache/submarine/docker$ tree
-# .
-# ├── Dockerfile
-# └── hadoopconf
-#     ├── capacity-scheduler.xml
-#     ├── configuration.xsl
-#     ...
-COPY hadoopconf /opt/hadoop-$HADOOP_VERSION/etc/hadoop
-
-# Download and config submarine
-RUN cd ~
-RUN git clone https://github.com/apache/submarine.git
-RUN cd submarine
-
-RUN submarine/dev-support/mini-submarine/submarine/build_python_virtual_env.sh
-RUN mv venv/ /opt/
-RUN chmod +r -R /opt/venv
-
-# Config Hadoop env
-ENV HADOOP_HOME=/opt/hadoop-$HADOOP_VERSION/
-ENV HADOOP_YARN_HOME=/opt/hadoop-$HADOOP_VERSION/
-ENV HADOOP_HDFS_HOME=/opt/hadoop-$HADOOP_VERSION/
-ENV HADOOP_CONF_DIR=/opt/hadoop-$HADOOP_VERSION/etc/hadoop
-ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-
-# Crate user, make sure the user groups are the same as your host
-RUN groupadd -g 5000 hadoop
-RUN useradd -u 1000 -g hadoop pi
-RUN mkdir /home/pi
-RUN chown pi:hadoop /home/pi
-RUN mkdir /tmp/mode
-RUN chmod 777 /tmp/mode
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/Dockerfile.cpu.tf_1.13.1 b/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/Dockerfile.cpu.tf_1.13.1
deleted file mode 100644
index 188e487..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/Dockerfile.cpu.tf_1.13.1
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM tf-1.13.1-cpu-base:0.0.1
-
-# Include models
-RUN mkdir /test
-ADD cifar10_estimator_tf_1.13.1 /test/cifar10_estimator
-RUN chown -R nobody /test
\ No newline at end of file
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/Dockerfile.gpu.tf_1.13.1 b/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/Dockerfile.gpu.tf_1.13.1
deleted file mode 100644
index 8819fa6..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/Dockerfile.gpu.tf_1.13.1
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM tf-1.13.1-gpu-base:0.0.1
-
-# Include models
-RUN mkdir /test
-ADD cifar10_estimator_tf_1.13.1 /test/cifar10_estimator
-RUN chown -R nobody /test
\ No newline at end of file
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/README.md b/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/README.md
deleted file mode 100644
index 4ca03fa..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/README.md
+++ /dev/null
@@ -1,542 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-(Copied from https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10_estimator)
-
-CIFAR-10 is a common benchmark in machine learning for image recognition.
-
-http://www.cs.toronto.edu/~kriz/cifar.html
-
-Code in this directory focuses on how to use TensorFlow Estimators to train and
-evaluate a CIFAR-10 ResNet model on:
-
-* A single host with one CPU;
-* A single host with multiple GPUs;
-* Multiple hosts with CPU or multiple GPUs;
-
-Before trying to run the model we highly encourage you to read all the README.
-
-## Prerequisite
-
-1. [Install](https://www.tensorflow.org/install/) TensorFlow version 1.2.1 or
-later.
-
-2. Download the CIFAR-10 dataset and generate TFRecord files using the provided
-script.  The script and associated command below will download the CIFAR-10
-dataset and then generate a TFRecord for the training, validation, and
-evaluation datasets.
-
-```shell
-python generate_cifar10_tfrecords.py --data-dir=${PWD}/cifar-10-data
-```
-
-After running the command above, you should see the following files in the
---data-dir (```ls -R cifar-10-data```):
-
-* train.tfrecords
-* validation.tfrecords
-* eval.tfrecords
-
-
-## Training on a single machine with GPUs or CPU
-
-Run the training on CPU only. After training, it runs the evaluation.
-
-```
-python cifar10_main.py --data-dir=${PWD}/cifar-10-data \
-                       --job-dir=/tmp/cifar10 \
-                       --num-gpus=0 \
-                       --train-steps=1000
-```
-
-Run the model on 2 GPUs using CPU as parameter server. After training, it runs
-the evaluation.
-```
-python cifar10_main.py --data-dir=${PWD}/cifar-10-data \
-                       --job-dir=/tmp/cifar10 \
-                       --num-gpus=2 \
-                       --train-steps=1000
-```
-
-Run the model on 2 GPUs using GPU as parameter server.
-It will run an experiment, which for local setting basically means it will run
-stop training
-a couple of times to perform evaluation.
-
-```
-python cifar10_main.py --data-dir=${PWD}/cifar-10-data \
-                       --job-dir=/tmp/cifar10 \
-                       --variable-strategy GPU \
-                       --num-gpus=2 \
-```
-
-There are more command line flags to play with; run
-`python cifar10_main.py --help` for details.
-
-## Run distributed training
-
-### (Optional) Running on Google Cloud Machine Learning Engine
-
-This example can be run on Google Cloud Machine Learning Engine (ML Engine),
-which will configure the environment and take care of running workers,
-parameters servers, and masters in a fault tolerant way.
-
-To install the command line tool, and set up a project and billing, see the
-quickstart [here](https://cloud.google.com/ml-engine/docs/quickstarts/command-line).
-
-You'll also need a Google Cloud Storage bucket for the data. If you followed the
-instructions above, you can just run:
-
-```
-MY_BUCKET=gs://<my-bucket-name>
-gsutil cp -r ${PWD}/cifar-10-data $MY_BUCKET/
-```
-
-Then run the following command from the `tutorials/image` directory of this
-repository (the parent directory of this README):
-
-```
-gcloud ml-engine jobs submit training cifarmultigpu \
-    --runtime-version 1.2 \
-    --job-dir=$MY_BUCKET/model_dirs/cifarmultigpu \
-    --config cifar10_estimator/cmle_config.yaml \
-    --package-path cifar10_estimator/ \
-    --module-name cifar10_estimator.cifar10_main \
-    -- \
-    --data-dir=$MY_BUCKET/cifar-10-data \
-    --num-gpus=4 \
-    --train-steps=1000
-```
-
-
-### Set TF_CONFIG
-
-Considering that you already have multiple hosts configured, all you need is a
-`TF_CONFIG` environment variable on each host. You can set up the hosts manually
-or check [tensorflow/ecosystem](https://github.com/tensorflow/ecosystem) for
-instructions about how to set up a Cluster.
-
-The `TF_CONFIG` will be used by the `RunConfig` to know the existing hosts and
-their task: `master`, `ps` or `worker`.
-
-Here's an example of `TF_CONFIG`.
-
-```python
-cluster = {'master': ['master-ip:8000'],
-           'ps': ['ps-ip:8000'],
-           'worker': ['worker-ip:8000']}
-
-TF_CONFIG = json.dumps(
-  {'cluster': cluster,
-   'task': {'type': master, 'index': 0},
-   'model_dir': 'gs://<bucket_path>/<dir_path>',
-   'environment': 'cloud'
-  })
-```
-
-*Cluster*
-
-A cluster spec, which is basically a dictionary that describes all of the tasks
-in the cluster. More about it [here](https://www.tensorflow.org/deploy/distributed).
-
-In this cluster spec we are defining a cluster with 1 master, 1 ps and 1 worker.
-
-* `ps`: saves the parameters among all workers. All workers can
-   read/write/update the parameters for model via ps. As some models are
-   extremely large the parameters are shared among the ps (each ps stores a
-   subset).
-
-* `worker`: does the training.
-
-* `master`: basically a special worker, it does training, but also restores and
-   saves checkpoints and do evaluation.
-
-*Task*
-
-The Task defines what is the role of the current node, for this example the node
-is the master on index 0 on the cluster spec, the task will be different for
-each node. An example of the `TF_CONFIG` for a worker would be:
-
-```python
-cluster = {'master': ['master-ip:8000'],
-           'ps': ['ps-ip:8000'],
-           'worker': ['worker-ip:8000']}
-
-TF_CONFIG = json.dumps(
-  {'cluster': cluster,
-   'task': {'type': worker, 'index': 0},
-   'model_dir': 'gs://<bucket_path>/<dir_path>',
-   'environment': 'cloud'
-  })
-```
-
-*Model_dir*
-
-This is the path where the master will save the checkpoints, graph and
-TensorBoard files. For a multi host environment you may want to use a
-Distributed File System, Google Storage and DFS are supported.
-
-*Environment*
-
-By the default environment is *local*, for a distributed setting we need to
-change it to *cloud*.
-
-### Running script
-
-Once you have a `TF_CONFIG` configured properly on each host you're ready to run
-on distributed settings.
-
-#### Master
-Run this on master:
-Runs an Experiment in sync mode on 4 GPUs using CPU as parameter server for
-40000 steps. It will run evaluation a couple of times during training. The
-num_workers argument is used only to update the learning rate correctly. Make
-sure the model_dir is the same as defined on the TF_CONFIG.
-
-```shell
-python cifar10_main.py --data-dir=gs://path/cifar-10-data \
-                       --job-dir=gs://path/model_dir/ \
-                       --num-gpus=4 \
-                       --train-steps=40000 \
-                       --sync \
-                       --num-workers=2
-```
-
-*Output:*
-
-```shell
-INFO:tensorflow:Using model_dir in TF_CONFIG: gs://path/model_dir/
-INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_num_ps_replicas': 1, '_keep_checkpoint_max': 5, '_task_type': u'master', '_is_chief': True, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7fd16fb2be10>, '_model_dir': 'gs://path/model_dir/', '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_session_config': intra_op_parallelism_threads: 1
-gpu_options {
-}
-allow_soft_placement: true
-, '_tf_random_seed': None, '_environment': u'cloud', '_num_worker_replicas': 1, '_task_id': 0, '_save_summary_steps': 100, '_tf_config': gpu_options {
-  per_process_gpu_memory_fraction: 1.0
-}
-, '_evaluation_master': '', '_master': u'grpc://master-ip:8000'}
-...
-2017-08-01 19:59:26.496208: I tensorflow/core/common_runtime/gpu/gpu_device.cc:940] Found device 0 with properties:
-name: Tesla K80
-major: 3 minor: 7 memoryClockRate (GHz) 0.8235
-pciBusID 0000:00:04.0
-Total memory: 11.17GiB
-Free memory: 11.09GiB
-2017-08-01 19:59:26.775660: I tensorflow/core/common_runtime/gpu/gpu_device.cc:940] Found device 1 with properties:
-name: Tesla K80
-major: 3 minor: 7 memoryClockRate (GHz) 0.8235
-pciBusID 0000:00:05.0
-Total memory: 11.17GiB
-Free memory: 11.10GiB
-...
-2017-08-01 19:59:29.675171: I tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:316] Started server with target: grpc://localhost:8000
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_1/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_2/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_3/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_4/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_5/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_6/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/avg_pool/: (?, 16, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_1/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_2/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_3/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_4/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_1/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_2/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_3/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_4/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_5/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_6/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/avg_pool/: (?, 32, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_1/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_2/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_3/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_4/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_5/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_6/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/global_avg_pool/: (?, 64)
-INFO:tensorflow:image after unit resnet/tower_0/fully_connected/: (?, 11)
-INFO:tensorflow:SyncReplicasV2: replicas_to_aggregate=1; total_num_replicas=1
-INFO:tensorflow:Create CheckpointSaverHook.
-INFO:tensorflow:Restoring parameters from gs://path/model_dir/model.ckpt-0
-2017-08-01 19:59:37.560775: I tensorflow/core/distributed_runtime/master_session.cc:999] Start master session 156fcb55fe6648d6 with config:
-intra_op_parallelism_threads: 1
-gpu_options {
-  per_process_gpu_memory_fraction: 1
-}
-allow_soft_placement: true
-
-INFO:tensorflow:Saving checkpoints for 1 into gs://path/model_dir/model.ckpt.
-INFO:tensorflow:loss = 1.20682, step = 1
-INFO:tensorflow:loss = 1.20682, learning_rate = 0.1
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_1/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_2/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_3/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_4/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_5/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_6/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/avg_pool/: (?, 16, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_1/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_2/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_3/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_4/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_5/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_6/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/avg_pool/: (?, 32, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_1/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_2/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_3/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_4/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_5/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_6/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/global_avg_pool/: (?, 64)
-INFO:tensorflow:image after unit resnet/tower_0/fully_connected/: (?, 11)
-INFO:tensorflow:SyncReplicasV2: replicas_to_aggregate=2; total_num_replicas=2
-INFO:tensorflow:Starting evaluation at 2017-08-01-20:00:14
-2017-08-01 20:00:15.745881: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:0) -> (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0)
-2017-08-01 20:00:15.745949: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:1) -> (device: 1, name: Tesla K80, pci bus id: 0000:00:05.0)
-2017-08-01 20:00:15.745958: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:2) -> (device: 2, name: Tesla K80, pci bus id: 0000:00:06.0)
-2017-08-01 20:00:15.745964: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:3) -> (device: 3, name: Tesla K80, pci bus id: 0000:00:07.0)
-2017-08-01 20:00:15.745969: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:4) -> (device: 4, name: Tesla K80, pci bus id: 0000:00:08.0)
-2017-08-01 20:00:15.745975: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:5) -> (device: 5, name: Tesla K80, pci bus id: 0000:00:09.0)
-2017-08-01 20:00:15.745987: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:6) -> (device: 6, name: Tesla K80, pci bus id: 0000:00:0a.0)
-2017-08-01 20:00:15.745997: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Creating TensorFlow device (/gpu:7) -> (device: 7, name: Tesla K80, pci bus id: 0000:00:0b.0)
-INFO:tensorflow:Restoring parameters from gs://path/model_dir/model.ckpt-10023
-INFO:tensorflow:Evaluation [1/100]
-INFO:tensorflow:Evaluation [2/100]
-INFO:tensorflow:Evaluation [3/100]
-INFO:tensorflow:Evaluation [4/100]
-INFO:tensorflow:Evaluation [5/100]
-INFO:tensorflow:Evaluation [6/100]
-INFO:tensorflow:Evaluation [7/100]
-INFO:tensorflow:Evaluation [8/100]
-INFO:tensorflow:Evaluation [9/100]
-INFO:tensorflow:Evaluation [10/100]
-INFO:tensorflow:Evaluation [11/100]
-INFO:tensorflow:Evaluation [12/100]
-INFO:tensorflow:Evaluation [13/100]
-...
-INFO:tensorflow:Evaluation [100/100]
-INFO:tensorflow:Finished evaluation at 2017-08-01-20:00:31
-INFO:tensorflow:Saving dict for global step 1: accuracy = 0.0994, global_step = 1, loss = 630.425
-```
-
-#### Worker
-
-Run this on worker:
-Runs an Experiment in sync mode on 4 GPUs using CPU as parameter server for
-40000 steps. It will run evaluation a couple of times during training. Make sure
-the model_dir is the same as defined on the TF_CONFIG.
-
-```shell
-python cifar10_main.py --data-dir=gs://path/cifar-10-data \
-                       --job-dir=gs://path/model_dir/ \
-                       --num-gpus=4 \
-                       --train-steps=40000 \
-                       --sync
-```
-
-*Output:*
-
-```shell
-INFO:tensorflow:Using model_dir in TF_CONFIG: gs://path/model_dir/
-INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600,
-'_num_ps_replicas': 1, '_keep_checkpoint_max': 5, '_task_type': u'worker',
-'_is_chief': False, '_cluster_spec':
-<tensorflow.python.training.server_lib.ClusterSpec object at 0x7f6918438e10>,
-'_model_dir': 'gs://<path>/model_dir/',
-'_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000,
-'_session_config': intra_op_parallelism_threads: 1
-gpu_options {
-}
-allow_soft_placement: true
-, '_tf_random_seed': None, '_environment': u'cloud', '_num_worker_replicas': 1,
-'_task_id': 0, '_save_summary_steps': 100, '_tf_config': gpu_options {
-  per_process_gpu_memory_fraction: 1.0
-  }
-...
-2017-08-01 19:59:26.496208: I tensorflow/core/common_runtime/gpu/gpu_device.cc:940] Found device 0 with properties:
-name: Tesla K80
-major: 3 minor: 7 memoryClockRate (GHz) 0.8235
-pciBusID 0000:00:04.0
-Total memory: 11.17GiB
-Free memory: 11.09GiB
-2017-08-01 19:59:26.775660: I tensorflow/core/common_runtime/gpu/gpu_device.cc:940] Found device 1 with properties:
-name: Tesla K80
-major: 3 minor: 7 memoryClockRate (GHz) 0.8235
-pciBusID 0000:00:05.0
-Total memory: 11.17GiB
-Free memory: 11.10GiB
-...
-2017-08-01 19:59:29.675171: I tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:316] Started server with target: grpc://localhost:8000
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_1/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_2/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_3/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_4/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_5/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage/residual_v1_6/: (?, 16, 32, 32)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/avg_pool/: (?, 16, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_1/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_2/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_3/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_4/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_1/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_2/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_3/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_4/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_5/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_1/residual_v1_6/: (?, 32, 16, 16)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/avg_pool/: (?, 32, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_1/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_2/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_3/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_4/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_5/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/stage_2/residual_v1_6/: (?, 64, 8, 8)
-INFO:tensorflow:image after unit resnet/tower_0/global_avg_pool/: (?, 64)
-INFO:tensorflow:image after unit resnet/tower_0/fully_connected/: (?, 11)
-INFO:tensorflow:SyncReplicasV2: replicas_to_aggregate=2; total_num_replicas=2
-INFO:tensorflow:Create CheckpointSaverHook.
-2017-07-31 22:38:04.629150: I
-tensorflow/core/distributed_runtime/master.cc:209] CreateSession still waiting
-for response from worker: /job:master/replica:0/task:0
-2017-07-31 22:38:09.263492: I
-tensorflow/core/distributed_runtime/master_session.cc:999] Start master
-session cc58f93b1e259b0c with config:
-intra_op_parallelism_threads: 1
-gpu_options {
-per_process_gpu_memory_fraction: 1
-}
-allow_soft_placement: true
-INFO:tensorflow:loss = 5.82382, step = 0
-INFO:tensorflow:loss = 5.82382, learning_rate = 0.8
-INFO:tensorflow:Average examples/sec: 1116.92 (1116.92), step = 10
-INFO:tensorflow:Average examples/sec: 1233.73 (1377.83), step = 20
-INFO:tensorflow:Average examples/sec: 1485.43 (2509.3), step = 30
-INFO:tensorflow:Average examples/sec: 1680.27 (2770.39), step = 40
-INFO:tensorflow:Average examples/sec: 1825.38 (2788.78), step = 50
-INFO:tensorflow:Average examples/sec: 1929.32 (2697.27), step = 60
-INFO:tensorflow:Average examples/sec: 2015.17 (2749.05), step = 70
-INFO:tensorflow:loss = 37.6272, step = 79 (19.554 sec)
-INFO:tensorflow:loss = 37.6272, learning_rate = 0.8 (19.554 sec)
-INFO:tensorflow:Average examples/sec: 2074.92 (2618.36), step = 80
-INFO:tensorflow:Average examples/sec: 2132.71 (2744.13), step = 90
-INFO:tensorflow:Average examples/sec: 2183.38 (2777.21), step = 100
-INFO:tensorflow:Average examples/sec: 2224.4 (2739.03), step = 110
-INFO:tensorflow:Average examples/sec: 2240.28 (2431.26), step = 120
-INFO:tensorflow:Average examples/sec: 2272.12 (2739.32), step = 130
-INFO:tensorflow:Average examples/sec: 2300.68 (2750.03), step = 140
-INFO:tensorflow:Average examples/sec: 2325.81 (2745.63), step = 150
-INFO:tensorflow:Average examples/sec: 2347.14 (2721.53), step = 160
-INFO:tensorflow:Average examples/sec: 2367.74 (2754.54), step = 170
-INFO:tensorflow:loss = 27.8453, step = 179 (18.893 sec)
-...
-```
-
-#### PS
-
-Run this on ps:
-The ps will not do training so most of the arguments won't affect the execution
-
-```shell
-python cifar10_main.py --job-dir=gs://path/model_dir/
-```
-
-*Output:*
-
-```shell
-INFO:tensorflow:Using model_dir in TF_CONFIG: gs://path/model_dir/
-INFO:tensorflow:Using config: {'_save_checkpoints_secs': 600, '_num_ps_replicas': 1, '_keep_checkpoint_max': 5, '_task_type': u'ps', '_is_chief': False, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f48f1addf90>, '_model_dir': 'gs://path/model_dir/', '_save_checkpoints_steps': None, '_keep_checkpoint_every_n_hours': 10000, '_session_config': intra_op_parallelism_threads: 1
-gpu_options {
-}
-allow_soft_placement: true
-, '_tf_random_seed': None, '_environment': u'cloud', '_num_worker_replicas': 1, '_task_id': 0, '_save_summary_steps': 100, '_tf_config': gpu_options {
-  per_process_gpu_memory_fraction: 1.0
-}
-, '_evaluation_master': '', '_master': u'grpc://master-ip:8000'}
-2017-07-31 22:54:58.928088: I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:215] Initialize GrpcChannelCache for job master -> {0 -> master-ip:8000}
-2017-07-31 22:54:58.928153: I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:215] Initialize GrpcChannelCache for job ps -> {0 -> localhost:8000}
-2017-07-31 22:54:58.928160: I tensorflow/core/distributed_runtime/rpc/grpc_channel.cc:215] Initialize GrpcChannelCache for job worker -> {0 -> worker-ip:8000}
-2017-07-31 22:54:58.929873: I tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc:316] Started server with target: grpc://localhost:8000
-```
-
-## Visualizing results with TensorBoard
-
-When using Estimators you can also visualize your data in TensorBoard, with no
-changes in your code. You can use TensorBoard to visualize your TensorFlow
-graph, plot quantitative metrics about the execution of your graph, and show
-additional data like images that pass through it.
-
-You'll see something similar to this if you "point" TensorBoard to the
-`job dir` parameter you used to train or evaluate your model.
-
-Check TensorBoard during training or after it. Just point TensorBoard to the
-model_dir you chose on the previous step.
-
-```shell
-tensorboard --log-dir="<job dir>"
-```
-
-## Warnings
-
-When running `cifar10_main.py` with `--sync` argument you may see an error
-similar to:
-
-```python
-File "cifar10_main.py", line 538, in <module>
-    tf.app.run()
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 48, in run
-    _sys.exit(main(_sys.argv[:1] + flags_passthrough))
-File "cifar10_main.py", line 518, in main
-    hooks), run_config=config)
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/learn_runner.py", line 210, in run
-    return _execute_schedule(experiment, schedule)
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/learn_runner.py", line 47, in _execute_schedule
-    return task()
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 501, in train_and_evaluate
-    hooks=self._eval_hooks)
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/experiment.py", line 681, in _call_evaluate
-    hooks=hooks)
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 292, in evaluate
-    name=name)
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 638, in _evaluate_model
-    features, labels, model_fn_lib.ModeKeys.EVAL)
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/estimator/estimator.py", line 545, in _call_model_fn
-    features=features, labels=labels, **kwargs)
-File "cifar10_main.py", line 331, in _resnet_model_fn
-    gradvars, global_step=tf.train.get_global_step())
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/sync_replicas_optimizer.py", line 252, in apply_gradients
-    variables.global_variables())
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/tf_should_use.py", line 170, in wrapped
-    return _add_should_use_warning(fn(*args, **kwargs))
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/tf_should_use.py", line 139, in _add_should_use_warning
-    wrapped = TFShouldUseWarningWrapper(x)
-File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/tf_should_use.py", line 96, in __init__
-    stack = [s.strip() for s in traceback.format_stack()]
-```
-
-This should not affect your training, and should be fixed on the next releases.
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10.py b/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10.py
deleted file mode 100644
index 29883d2..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""CIFAR-10 data set.
-
-See http://www.cs.toronto.edu/~kriz/cifar.html.
-"""
-import os
-
-import tensorflow as tf
-
-HEIGHT = 32
-WIDTH = 32
-DEPTH = 3
-
-
-class Cifar10DataSet(object):
-    """Cifar10 data set.
-
-    Described by http://www.cs.toronto.edu/~kriz/cifar.html.
-    """
-
-    def __init__(self, data_dir, subset="train", use_distortion=True):
-        self.data_dir = data_dir
-        self.subset = subset
-        self.use_distortion = use_distortion
-
-    def get_filenames(self):
-        if self.subset in ["train", "validation", "eval"]:
-            return [os.path.join(self.data_dir, self.subset + ".tfrecords")]
-        else:
-            raise ValueError('Invalid data subset "%s"' % self.subset)
-
-    def parser(self, serialized_example):
-        """Parses a single tf.Example into image and label tensors."""
-        # Dimensions of the images in the CIFAR-10 dataset.
-        # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
-        # input format.
-        features = tf.parse_single_example(
-            serialized_example,
-            features={
-                "image": tf.FixedLenFeature([], tf.string),
-                "label": tf.FixedLenFeature([], tf.int64),
-            },
-        )
-        image = tf.decode_raw(features["image"], tf.uint8)
-        image.set_shape([DEPTH * HEIGHT * WIDTH])
-
-        # Reshape from [depth * height * width] to [depth, height, width].
-        image = tf.cast(
-            tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]), tf.float32
-        )
-        label = tf.cast(features["label"], tf.int32)
-
-        # Custom preprocessing.
-        image = self.preprocess(image)
-
-        return image, label
-
-    def make_batch(self, batch_size):
-        """Read the images and labels from 'filenames'."""
-        filenames = self.get_filenames()
-        # Repeat infinitely.
-        dataset = tf.data.TFRecordDataset(filenames).repeat()
-
-        # Parse records.
-        dataset = dataset.map(self.parser, num_parallel_calls=batch_size)
-
-        # Potentially shuffle records.
-        if self.subset == "train":
-            min_queue_examples = int(Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4)
-            # Ensure that the capacity is sufficiently large to provide good random
-            # shuffling.
-            dataset = dataset.shuffle(buffer_size=min_queue_examples + 3 * batch_size)
-
-        # Batch it up.
-        dataset = dataset.batch(batch_size)
-        iterator = dataset.make_one_shot_iterator()
-        image_batch, label_batch = iterator.get_next()
-
-        return image_batch, label_batch
-
-    def preprocess(self, image):
-        """Preprocess a single image in [height, width, depth] layout."""
-        if self.subset == "train" and self.use_distortion:
-            # Pad 4 pixels on each dimension of feature map, done in mini-batch
-            image = tf.image.resize_image_with_crop_or_pad(image, 40, 40)
-            image = tf.random_crop(image, [HEIGHT, WIDTH, DEPTH])
-            image = tf.image.random_flip_left_right(image)
-        return image
-
-    @staticmethod
-    def num_examples_per_epoch(subset="train"):
-        if subset == "train":
-            return 45000
-        elif subset == "validation":
-            return 5000
-        elif subset == "eval":
-            return 10000
-        else:
-            raise ValueError('Invalid data subset "%s"' % subset)
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10_main.py b/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10_main.py
deleted file mode 100644
index 91e8e48..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10_main.py
+++ /dev/null
@@ -1,519 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""ResNet model for classifying images from CIFAR-10 dataset.
-
-Support single-host training with one or multiple devices.
-
-ResNet as proposed in:
-Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
-Deep Residual Learning for Image Recognition. arXiv:1512.03385
-
-CIFAR-10 as in:
-http://www.cs.toronto.edu/~kriz/cifar.html
-
-
-"""
-from __future__ import division, print_function
-
-import argparse
-import functools
-import itertools
-import os
-
-import cifar10
-import cifar10_model
-import cifar10_utils
-import numpy as np
-import six
-import tensorflow as tf
-from six.moves import xrange  # pylint: disable=redefined-builtin
-
-tf.logging.set_verbosity(tf.logging.INFO)
-
-
-def get_model_fn(num_gpus, variable_strategy, num_workers):
-    """Returns a function that will build the resnet model."""
-
-    def _resnet_model_fn(features, labels, mode, params):
-        """Resnet model body.
-
-        Support single host, one or more GPU training. Parameter distribution can
-        be either one of the following scheme.
-        1. CPU is the parameter server and manages gradient updates.
-        2. Parameters are distributed evenly across all GPUs, and the first GPU
-           manages gradient updates.
-
-        Args:
-          features: a list of tensors, one for each tower
-          labels: a list of tensors, one for each tower
-          mode: ModeKeys.TRAIN or EVAL
-          params: Hyperparameters suitable for tuning
-        Returns:
-          A EstimatorSpec object.
-        """
-        is_training = mode == tf.estimator.ModeKeys.TRAIN
-        weight_decay = params.weight_decay
-        momentum = params.momentum
-
-        tower_features = features
-        tower_labels = labels
-        tower_losses = []
-        tower_gradvars = []
-        tower_preds = []
-
-        # channels first (NCHW) is normally optimal on GPU and channels last (NHWC)
-        # on CPU. The exception is Intel MKL on CPU which is optimal with
-        # channels_last.
-        data_format = params.data_format
-        if not data_format:
-            if num_gpus == 0:
-                data_format = "channels_last"
-            else:
-                data_format = "channels_first"
-
-        if num_gpus == 0:
-            num_devices = 1
-            device_type = "cpu"
-        else:
-            num_devices = num_gpus
-            device_type = "gpu"
-
-        for i in range(num_devices):
-            worker_device = "/{}:{}".format(device_type, i)
-            if variable_strategy == "CPU":
-                device_setter = cifar10_utils.local_device_setter(worker_device=worker_device)
-            elif variable_strategy == "GPU":
-                device_setter = cifar10_utils.local_device_setter(
-                    ps_device_type="gpu",
-                    worker_device=worker_device,
-                    ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(
-                        num_gpus, tf.contrib.training.byte_size_load_fn
-                    ),
-                )
-            with tf.variable_scope("resnet", reuse=bool(i != 0)):
-                with tf.name_scope("tower_%d" % i) as name_scope:
-                    with tf.device(device_setter):
-                        loss, gradvars, preds = _tower_fn(
-                            is_training,
-                            weight_decay,
-                            tower_features[i],
-                            tower_labels[i],
-                            data_format,
-                            params.num_layers,
-                            params.batch_norm_decay,
-                            params.batch_norm_epsilon,
-                        )
-                        tower_losses.append(loss)
-                        tower_gradvars.append(gradvars)
-                        tower_preds.append(preds)
-                        if i == 0:
-                            # Only trigger batch_norm moving mean and variance update from
-                            # the 1st tower. Ideally, we should grab the updates from all
-                            # towers but these stats accumulate extremely fast so we can
-                            # ignore the other stats from the other towers without
-                            # significant detriment.
-                            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)
-
-        # Now compute global loss and gradients.
-        gradvars = []
-        with tf.name_scope("gradient_averaging"):
-            all_grads = {}
-            for grad, var in itertools.chain(*tower_gradvars):
-                if grad is not None:
-                    all_grads.setdefault(var, []).append(grad)
-            for var, grads in six.iteritems(all_grads):
-                # Average gradients on the same device as the variables
-                # to which they apply.
-                with tf.device(var.device):
-                    if len(grads) == 1:
-                        avg_grad = grads[0]
-                    else:
-                        avg_grad = tf.multiply(tf.add_n(grads), 1.0 / len(grads))
-                gradvars.append((avg_grad, var))
-
-        # Device that runs the ops to apply global gradient updates.
-        consolidation_device = "/gpu:0" if variable_strategy == "GPU" else "/cpu:0"
-        with tf.device(consolidation_device):
-            # Suggested learning rate scheduling from
-            # https://github.com/ppwwyyxx/tensorpack/blob/master/examples/ResNet/cifar10-resnet.py#L155
-            num_batches_per_epoch = cifar10.Cifar10DataSet.num_examples_per_epoch("train") // (
-                params.train_batch_size * num_workers
-            )
-            boundaries = [
-                num_batches_per_epoch * x for x in np.array([82, 123, 300], dtype=np.int64)
-            ]
-            staged_lr = [params.learning_rate * x for x in [1, 0.1, 0.01, 0.002]]
-
-            learning_rate = tf.train.piecewise_constant(
-                tf.train.get_global_step(), boundaries, staged_lr
-            )
-
-            loss = tf.reduce_mean(tower_losses, name="loss")
-
-            examples_sec_hook = cifar10_utils.ExamplesPerSecondHook(
-                params.train_batch_size, every_n_steps=10
-            )
-
-            tensors_to_log = {"learning_rate": learning_rate, "loss": loss}
-
-            logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=100)
-
-            train_hooks = [logging_hook, examples_sec_hook]
-
-            optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum)
-
-            if params.sync:
-                optimizer = tf.train.SyncReplicasOptimizer(
-                    optimizer, replicas_to_aggregate=num_workers
-                )
-                sync_replicas_hook = optimizer.make_session_run_hook(params.is_chief)
-                train_hooks.append(sync_replicas_hook)
-
-            # Create single grouped train op
-            train_op = [optimizer.apply_gradients(gradvars, global_step=tf.train.get_global_step())]
-            train_op.extend(update_ops)
-            train_op = tf.group(*train_op)
-
-            predictions = {
-                "classes": tf.concat([p["classes"] for p in tower_preds], axis=0),
-                "probabilities": tf.concat([p["probabilities"] for p in tower_preds], axis=0),
-            }
-            stacked_labels = tf.concat(labels, axis=0)
-            metrics = {"accuracy": tf.metrics.accuracy(stacked_labels, predictions["classes"])}
-
-        return tf.estimator.EstimatorSpec(
-            mode=mode,
-            predictions=predictions,
-            loss=loss,
-            train_op=train_op,
-            training_hooks=train_hooks,
-            eval_metric_ops=metrics,
-        )
-
-    return _resnet_model_fn
-
-
-def _tower_fn(
-    is_training,
-    weight_decay,
-    feature,
-    label,
-    data_format,
-    num_layers,
-    batch_norm_decay,
-    batch_norm_epsilon,
-):
-    """Build computation tower (Resnet).
-
-    Args:
-      is_training: true if is training graph.
-      weight_decay: weight regularization strength, a float.
-      feature: a Tensor.
-      label: a Tensor.
-      data_format: channels_last (NHWC) or channels_first (NCHW).
-      num_layers: number of layers, an int.
-      batch_norm_decay: decay for batch normalization, a float.
-      batch_norm_epsilon: epsilon for batch normalization, a float.
-
-    Returns:
-      A tuple with the loss for the tower, the gradients and parameters, and
-      predictions.
-
-    """
-    model = cifar10_model.ResNetCifar10(
-        num_layers,
-        batch_norm_decay=batch_norm_decay,
-        batch_norm_epsilon=batch_norm_epsilon,
-        is_training=is_training,
-        data_format=data_format,
-    )
-    logits = model.forward_pass(feature, input_data_format="channels_last")
-    tower_pred = {
-        "classes": tf.argmax(input=logits, axis=1),
-        "probabilities": tf.nn.softmax(logits),
-    }
-
-    tower_loss = tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=label)
-    tower_loss = tf.reduce_mean(tower_loss)
-
-    model_params = tf.trainable_variables()
-    tower_loss += weight_decay * tf.add_n([tf.nn.l2_loss(v) for v in model_params])
-
-    tower_grad = tf.gradients(tower_loss, model_params)
-
-    return tower_loss, zip(tower_grad, model_params), tower_pred
-
-
-def input_fn(data_dir, subset, num_shards, batch_size, use_distortion_for_training=True):
-    """Create input graph for model.
-
-    Args:
-      data_dir: Directory where TFRecords representing the dataset are located.
-      subset: one of 'train', 'validate' and 'eval'.
-      num_shards: num of towers participating in data-parallel training.
-      batch_size: total batch size for training to be divided by the number of
-      shards.
-      use_distortion_for_training: True to use distortions.
-    Returns:
-      two lists of tensors for features and labels, each of num_shards length.
-    """
-    with tf.device("/cpu:0"):
-        use_distortion = subset == "train" and use_distortion_for_training
-        dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)
-        image_batch, label_batch = dataset.make_batch(batch_size)
-        if num_shards <= 1:
-            # No GPU available or only 1 GPU.
-            return [image_batch], [label_batch]
-
-        # Note that passing num=batch_size is safe here, even though
-        # dataset.batch(batch_size) can, in some cases, return fewer than batch_size
-        # examples. This is because it does so only when repeating for a limited
-        # number of epochs, but our dataset repeats forever.
-        image_batch = tf.unstack(image_batch, num=batch_size, axis=0)
-        label_batch = tf.unstack(label_batch, num=batch_size, axis=0)
-        feature_shards = [[] for i in range(num_shards)]
-        label_shards = [[] for i in range(num_shards)]
-        for i in xrange(batch_size):
-            idx = i % num_shards
-            feature_shards[idx].append(image_batch[i])
-            label_shards[idx].append(label_batch[i])
-        feature_shards = [tf.parallel_stack(x) for x in feature_shards]
-        label_shards = [tf.parallel_stack(x) for x in label_shards]
-        return feature_shards, label_shards
-
-
-def get_experiment_fn(data_dir, num_gpus, variable_strategy, use_distortion_for_training=True):
-    """Returns an Experiment function.
-
-    Experiments perform training on several workers in parallel,
-    in other words experiments know how to invoke train and eval in a sensible
-    fashion for distributed training. Arguments passed directly to this
-    function are not tunable, all other arguments should be passed within
-    tf.HParams, passed to the enclosed function.
-
-    Args:
-        data_dir: str. Location of the data for input_fns.
-        num_gpus: int. Number of GPUs on each worker.
-        variable_strategy: String. CPU to use CPU as the parameter server
-        and GPU to use the GPUs as the parameter server.
-        use_distortion_for_training: bool. See cifar10.Cifar10DataSet.
-    Returns:
-        A function (tf.estimator.RunConfig, tf.contrib.training.HParams) ->
-        tf.contrib.learn.Experiment.
-
-        Suitable for use by tf.contrib.learn.learn_runner, which will run various
-        methods on Experiment (train, evaluate) based on information
-        about the current runner in `run_config`.
-    """
-
-    def _experiment_fn(run_config, hparams):
-        """Returns an Experiment."""
-        # Create estimator.
-        train_input_fn = functools.partial(
-            input_fn,
-            data_dir,
-            subset="train",
-            num_shards=num_gpus,
-            batch_size=hparams.train_batch_size,
-            use_distortion_for_training=use_distortion_for_training,
-        )
-
-        eval_input_fn = functools.partial(
-            input_fn,
-            data_dir,
-            subset="eval",
-            batch_size=hparams.eval_batch_size,
-            num_shards=num_gpus,
-        )
-
-        num_eval_examples = cifar10.Cifar10DataSet.num_examples_per_epoch("eval")
-        if num_eval_examples % hparams.eval_batch_size != 0:
-            raise ValueError("validation set size must be multiple of eval_batch_size")
-
-        train_steps = hparams.train_steps
-        eval_steps = num_eval_examples // hparams.eval_batch_size
-
-        classifier = tf.estimator.Estimator(
-            model_fn=get_model_fn(num_gpus, variable_strategy, run_config.num_worker_replicas or 1),
-            config=run_config,
-            params=hparams,
-        )
-
-        # Create experiment.
-        return tf.contrib.learn.Experiment(
-            classifier,
-            train_input_fn=train_input_fn,
-            eval_input_fn=eval_input_fn,
-            train_steps=train_steps,
-            eval_steps=eval_steps,
-        )
-
-    return _experiment_fn
-
-
-def main(
-    job_dir,
-    data_dir,
-    num_gpus,
-    variable_strategy,
-    use_distortion_for_training,
-    log_device_placement,
-    num_intra_threads,
-    **hparams
-):
-    # The env variable is on deprecation path, default is set to off.
-    os.environ["TF_SYNC_ON_FINISH"] = "0"
-    os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "1"
-
-    # Session configuration.
-    sess_config = tf.ConfigProto(
-        allow_soft_placement=True,
-        log_device_placement=log_device_placement,
-        intra_op_parallelism_threads=num_intra_threads,
-        gpu_options=tf.GPUOptions(force_gpu_compatible=True),
-    )
-
-    config = cifar10_utils.RunConfig(session_config=sess_config, model_dir=job_dir)
-    tf.contrib.learn.learn_runner.run(
-        get_experiment_fn(data_dir, num_gpus, variable_strategy, use_distortion_for_training),
-        run_config=config,
-        hparams=tf.contrib.training.HParams(is_chief=config.is_chief, **hparams),
-    )
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()
-    parser.add_argument(
-        "--data-dir",
-        type=str,
-        required=True,
-        help="The directory where the CIFAR-10 input data is stored.",
-    )
-    parser.add_argument(
-        "--job-dir", type=str, required=True, help="The directory where the model will be stored."
-    )
-    parser.add_argument(
-        "--variable-strategy",
-        choices=["CPU", "GPU"],
-        type=str,
-        default="CPU",
-        help="Where to locate variable operations",
-    )
-    parser.add_argument(
-        "--num-gpus",
-        type=int,
-        default=1,
-        help="The number of gpus used. Uses only CPU if set to 0.",
-    )
-    parser.add_argument(
-        "--num-layers", type=int, default=44, help="The number of layers of the model."
-    )
-    parser.add_argument(
-        "--train-steps", type=int, default=80000, help="The number of steps to use for training."
-    )
-    parser.add_argument(
-        "--train-batch-size", type=int, default=128, help="Batch size for training."
-    )
-    parser.add_argument(
-        "--eval-batch-size", type=int, default=100, help="Batch size for validation."
-    )
-    parser.add_argument(
-        "--momentum", type=float, default=0.9, help="Momentum for MomentumOptimizer."
-    )
-    parser.add_argument(
-        "--weight-decay", type=float, default=2e-4, help="Weight decay for convolutions."
-    )
-    parser.add_argument(
-        "--learning-rate",
-        type=float,
-        default=0.1,
-        help="""\
-      This is the initial learning rate value. The learning rate will decrease
-      during training. For more details check the model_fn implementation in
-      this file.\
-      """,
-    )
-    parser.add_argument(
-        "--use-distortion-for-training",
-        type=bool,
-        default=True,
-        help="If doing image distortion for training.",
-    )
-    parser.add_argument(
-        "--sync",
-        action="store_true",
-        default=False,
-        help="""\
-      If present when running in a distributed environment will run on sync mode.\
-      """,
-    )
-    parser.add_argument(
-        "--num-intra-threads",
-        type=int,
-        default=0,
-        help="""\
-      Number of threads to use for intra-op parallelism. When training on CPU
-      set to 0 to have the system pick the appropriate number or alternatively
-      set it to the number of physical CPU cores.\
-      """,
-    )
-    parser.add_argument(
-        "--num-inter-threads",
-        type=int,
-        default=0,
-        help="""\
-      Number of threads to use for inter-op parallelism. If set to 0, the
-      system will pick an appropriate number.\
-      """,
-    )
-    parser.add_argument(
-        "--data-format",
-        type=str,
-        default=None,
-        help="""\
-      If not set, the data format best for the training device is used. 
-      Allowed values: channels_first (NCHW) channels_last (NHWC).\
-      """,
-    )
-    parser.add_argument(
-        "--log-device-placement",
-        action="store_true",
-        default=False,
-        help="Whether to log device placement.",
-    )
-    parser.add_argument(
-        "--batch-norm-decay", type=float, default=0.997, help="Decay for batch norm."
-    )
-    parser.add_argument(
-        "--batch-norm-epsilon", type=float, default=1e-5, help="Epsilon for batch norm."
-    )
-    args = parser.parse_args()
-
-    if args.num_gpus > 0:
-        assert tf.test.is_gpu_available(), "Requested GPUs but none found."
-    if args.num_gpus < 0:
-        raise ValueError('Invalid GPU count: "--num-gpus" must be 0 or a positive integer.')
-    if args.num_gpus == 0 and args.variable_strategy == "GPU":
-        raise ValueError(
-            "num-gpus=0, CPU must be used as parameter server. Set--variable-strategy=CPU."
-        )
-    if (args.num_layers - 2) % 6 != 0:
-        raise ValueError("Invalid --num-layers parameter.")
-    if args.num_gpus != 0 and args.train_batch_size % args.num_gpus != 0:
-        raise ValueError("--train-batch-size must be multiple of --num-gpus.")
-    if args.num_gpus != 0 and args.eval_batch_size % args.num_gpus != 0:
-        raise ValueError("--eval-batch-size must be multiple of --num-gpus.")
-
-    main(**vars(args))
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10_model.py b/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10_model.py
deleted file mode 100644
index 019f28b..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10_model.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Model class for Cifar10 Dataset."""
-from __future__ import division, print_function
-
-import model_base
-import tensorflow as tf
-
-
-class ResNetCifar10(model_base.ResNet):
-    """Cifar10 model with ResNetV1 and basic residual block."""
-
-    def __init__(
-        self,
-        num_layers,
-        is_training,
-        batch_norm_decay,
-        batch_norm_epsilon,
-        data_format="channels_first",
-    ):
-        super(ResNetCifar10, self).__init__(
-            is_training, data_format, batch_norm_decay, batch_norm_epsilon
-        )
-        self.n = (num_layers - 2) // 6
-        # Add one in case label starts with 1. No impact if label starts with 0.
-        self.num_classes = 10 + 1
-        self.filters = [16, 16, 32, 64]
-        self.strides = [1, 2, 2]
-
-    def forward_pass(self, x, input_data_format="channels_last"):
-        """Build the core model within the graph."""
-        if self._data_format != input_data_format:
-            if input_data_format == "channels_last":
-                # Computation requires channels_first.
-                x = tf.transpose(x, [0, 3, 1, 2])
-            else:
-                # Computation requires channels_last.
-                x = tf.transpose(x, [0, 2, 3, 1])
-
-        # Image standardization.
-        x = x / 128 - 1
-
-        x = self._conv(x, 3, 16, 1)
-        x = self._batch_norm(x)
-        x = self._relu(x)
-
-        # Use basic (non-bottleneck) block and ResNet V1 (post-activation).
-        res_func = self._residual_v1
-
-        # 3 stages of block stacking.
-        for i in range(3):
-            with tf.name_scope("stage"):
-                for j in range(self.n):
-                    if j == 0:
-                        # First block in a stage, filters and strides may change.
-                        x = res_func(x, 3, self.filters[i], self.filters[i + 1], self.strides[i])
-                    else:
-                        # Following blocks in a stage, constant filters and unit stride.
-                        x = res_func(x, 3, self.filters[i + 1], self.filters[i + 1], 1)
-
-        x = self._global_avg_pool(x)
-        x = self._fully_connected(x, self.num_classes)
-
-        return x
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10_utils.py b/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10_utils.py
deleted file mode 100644
index 56d7d91..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/cifar10_utils.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-import collections
-
-import six
-import tensorflow as tf
-from tensorflow.contrib.learn.python.learn import run_config
-from tensorflow.core.framework import node_def_pb2
-from tensorflow.python.framework import device as pydev
-from tensorflow.python.platform import tf_logging as logging
-from tensorflow.python.training import (
-    basic_session_run_hooks,
-    device_setter,
-    session_run_hook,
-    training_util,
-)
-
-
-# TODO(b/64848083) Remove once uid bug is fixed
-class RunConfig(tf.contrib.learn.RunConfig):
-    def uid(self, whitelist=None):
-        """Generates a 'Unique Identifier' based on all internal fields.
-        Caller should use the uid string to check `RunConfig` instance integrity
-        in one session use, but should not rely on the implementation details, which
-        is subject to change.
-        Args:
-          whitelist: A list of the string names of the properties uid should not
-            include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
-            includes most properties user allowed to change.
-        Returns:
-          A uid string.
-        """
-        if whitelist is None:
-            whitelist = run_config._DEFAULT_UID_WHITE_LIST
-
-        state = {k: v for k, v in self.__dict__.items() if not k.startswith("__")}
-        # Pop out the keys in whitelist.
-        for k in whitelist:
-            state.pop("_" + k, None)
-
-        ordered_state = collections.OrderedDict(sorted(state.items(), key=lambda t: t[0]))
-        # For class instance without __repr__, some special cares are required.
-        # Otherwise, the object address will be used.
-        if "_cluster_spec" in ordered_state:
-            ordered_state["_cluster_spec"] = collections.OrderedDict(
-                sorted(ordered_state["_cluster_spec"].as_dict().items(), key=lambda t: t[0])
-            )
-        return ", ".join("%s=%r" % (k, v) for (k, v) in six.iteritems(ordered_state))
-
-
-class ExamplesPerSecondHook(session_run_hook.SessionRunHook):
-    """Hook to print out examples per second.
-
-    Total time is tracked and then divided by the total number of steps
-    to get the average step time and then batch_size is used to determine
-    the running average of examples per second. The examples per second for the
-    most recent interval is also logged.
-    """
-
-    def __init__(
-        self,
-        batch_size,
-        every_n_steps=100,
-        every_n_secs=None,
-    ):
-        """Initializer for ExamplesPerSecondHook.
-
-        Args:
-        batch_size: Total batch size used to calculate examples/second from
-        global time.
-        every_n_steps: Log stats every n steps.
-        every_n_secs: Log stats every n seconds.
-        """
-        if (every_n_steps is None) == (every_n_secs is None):
-            raise ValueError("exactly one of every_n_steps and every_n_secs should be provided.")
-        self._timer = basic_session_run_hooks.SecondOrStepTimer(
-            every_steps=every_n_steps, every_secs=every_n_secs
-        )
-
-        self._step_train_time = 0
-        self._total_steps = 0
-        self._batch_size = batch_size
-
-    def begin(self):
-        self._global_step_tensor = training_util.get_global_step()
-        if self._global_step_tensor is None:
-            raise RuntimeError("Global step should be created to use StepCounterHook.")
-
-    def before_run(self, run_context):  # pylint: disable=unused-argument
-        return basic_session_run_hooks.SessionRunArgs(self._global_step_tensor)
-
-    def after_run(self, run_context, run_values):
-        _ = run_context
-
-        global_step = run_values.results
-        if self._timer.should_trigger_for_step(global_step):
-            elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(global_step)
-            if elapsed_time is not None:
-                steps_per_sec = elapsed_steps / elapsed_time
-                self._step_train_time += elapsed_time
-                self._total_steps += elapsed_steps
-
-                average_examples_per_sec = self._batch_size * (
-                    self._total_steps / self._step_train_time
-                )
-                current_examples_per_sec = steps_per_sec * self._batch_size
-                # Average examples/sec followed by current examples/sec
-                logging.info(
-                    "%s: %g (%g), step = %g",
-                    "Average examples/sec",
-                    average_examples_per_sec,
-                    current_examples_per_sec,
-                    self._total_steps,
-                )
-
-
-def local_device_setter(
-    num_devices=1, ps_device_type="cpu", worker_device="/cpu:0", ps_ops=None, ps_strategy=None
-):
-    if ps_ops == None:
-        ps_ops = ["Variable", "VariableV2", "VarHandleOp"]
-
-    if ps_strategy is None:
-        ps_strategy = device_setter._RoundRobinStrategy(num_devices)
-    if not six.callable(ps_strategy):
-        raise TypeError("ps_strategy must be callable")
-
-    def _local_device_chooser(op):
-        current_device = pydev.DeviceSpec.from_string(op.device or "")
-
-        node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
-        if node_def.op in ps_ops:
-            ps_device_spec = pydev.DeviceSpec.from_string(
-                "/{}:{}".format(ps_device_type, ps_strategy(op))
-            )
-
-            ps_device_spec.merge_from(current_device)
-            return ps_device_spec.to_string()
-        else:
-            worker_device_spec = pydev.DeviceSpec.from_string(worker_device or "")
-            worker_device_spec.merge_from(current_device)
-            return worker_device_spec.to_string()
-
-    return _local_device_chooser
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/generate_cifar10_tfrecords.py b/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/generate_cifar10_tfrecords.py
deleted file mode 100644
index ca5fc95..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/generate_cifar10_tfrecords.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Read CIFAR-10 data from pickled numpy arrays and writes TFRecords.
-
-Generates tf.train.Example protos and writes them to TFRecord files from the
-python version of the CIFAR-10 dataset downloaded from
-https://www.cs.toronto.edu/~kriz/cifar.html.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import argparse
-import os
-import sys
-import tarfile
-
-import tensorflow as tf
-from six.moves import cPickle as pickle
-from six.moves import xrange  # pylint: disable=redefined-builtin
-
-CIFAR_FILENAME = "cifar-10-python.tar.gz"
-CIFAR_DOWNLOAD_URL = "https://www.cs.toronto.edu/~kriz/" + CIFAR_FILENAME
-CIFAR_LOCAL_FOLDER = "cifar-10-batches-py"
-
-
-def download_and_extract(data_dir):
-    # download CIFAR-10 if not already downloaded.
-    tf.contrib.learn.datasets.base.maybe_download(CIFAR_FILENAME, data_dir, CIFAR_DOWNLOAD_URL)
-    tarfile.open(os.path.join(data_dir, CIFAR_FILENAME), "r:gz").extractall(data_dir)
-
-
-def _int64_feature(value):
-    return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
-
-
-def _bytes_feature(value):
-    return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
-
-
-def _get_file_names():
-    """Returns the file names expected to exist in the input_dir."""
-    file_names = {}
-    file_names["train"] = ["data_batch_%d" % i for i in xrange(1, 5)]
-    file_names["validation"] = ["data_batch_5"]
-    file_names["eval"] = ["test_batch"]
-    return file_names
-
-
-def read_pickle_from_file(filename):
-    with tf.gfile.Open(filename, "rb") as f:
-        if sys.version_info >= (3, 0):
-            data_dict = pickle.load(f, encoding="bytes")
-        else:
-            data_dict = pickle.load(f)
-    return data_dict
-
-
-def convert_to_tfrecord(input_files, output_file):
-    """Converts a file to TFRecords."""
-    print("Generating %s" % output_file)
-    with tf.python_io.TFRecordWriter(output_file) as record_writer:
-        for input_file in input_files:
-            data_dict = read_pickle_from_file(input_file)
-            data = data_dict[b"data"]
-            labels = data_dict[b"labels"]
-            num_entries_in_batch = len(labels)
-            for i in range(num_entries_in_batch):
-                example = tf.train.Example(
-                    features=tf.train.Features(
-                        feature={
-                            "image": _bytes_feature(data[i].tobytes()),
-                            "label": _int64_feature(labels[i]),
-                        }
-                    )
-                )
-                record_writer.write(example.SerializeToString())
-
-
-def main(data_dir):
-    print("Download from {} and extract.".format(CIFAR_DOWNLOAD_URL))
-    download_and_extract(data_dir)
-    file_names = _get_file_names()
-    input_dir = os.path.join(data_dir, CIFAR_LOCAL_FOLDER)
-    for mode, files in file_names.items():
-        input_files = [os.path.join(input_dir, f) for f in files]
-        output_file = os.path.join(data_dir, mode + ".tfrecords")
-        try:
-            os.remove(output_file)
-        except OSError:
-            pass
-        # Convert to tf.train.Example and write the to TFRecords.
-        convert_to_tfrecord(input_files, output_file)
-    print("Done!")
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser()
-    parser.add_argument(
-        "--data-dir", type=str, default="", help="Directory to download and extract CIFAR-10 to."
-    )
-
-    args = parser.parse_args()
-    main(args.data_dir)
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/model_base.py b/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/model_base.py
deleted file mode 100644
index 9c468bc..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/with-cifar10-models/ubuntu-18.04/cifar10_estimator_tf_1.13.1/model_base.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""ResNet model.
-
-Related papers:
-https://arxiv.org/pdf/1603.05027v2.pdf
-https://arxiv.org/pdf/1512.03385v1.pdf
-https://arxiv.org/pdf/1605.07146v1.pdf
-"""
-from __future__ import absolute_import, division, print_function
-
-import tensorflow as tf
-
-
-class ResNet(object):
-    """ResNet model."""
-
-    def __init__(self, is_training, data_format, batch_norm_decay, batch_norm_epsilon):
-        """ResNet constructor.
-
-        Args:
-          is_training: if build training or inference model.
-          data_format: the data_format used during computation.
-                       one of 'channels_first' or 'channels_last'.
-        """
-        self._batch_norm_decay = batch_norm_decay
-        self._batch_norm_epsilon = batch_norm_epsilon
-        self._is_training = is_training
-        assert data_format in ("channels_first", "channels_last")
-        self._data_format = data_format
-
-    def forward_pass(self, x):
-        raise NotImplementedError("forward_pass() is implemented in ResNet sub classes")
-
-    def _residual_v1(
-        self, x, kernel_size, in_filter, out_filter, stride, activate_before_residual=False
-    ):
-        """Residual unit with 2 sub layers, using Plan A for shortcut connection."""
-
-        del activate_before_residual
-        with tf.name_scope("residual_v1") as name_scope:
-            orig_x = x
-
-            x = self._conv(x, kernel_size, out_filter, stride)
-            x = self._batch_norm(x)
-            x = self._relu(x)
-
-            x = self._conv(x, kernel_size, out_filter, 1)
-            x = self._batch_norm(x)
-
-            if in_filter != out_filter:
-                orig_x = self._avg_pool(orig_x, stride, stride)
-                pad = (out_filter - in_filter) // 2
-                if self._data_format == "channels_first":
-                    orig_x = tf.pad(orig_x, [[0, 0], [pad, pad], [0, 0], [0, 0]])
-                else:
-                    orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [pad, pad]])
-
-            x = self._relu(tf.add(x, orig_x))
-
-            tf.logging.info("image after unit %s: %s", name_scope, x.get_shape())
-            return x
-
-    def _residual_v2(self, x, in_filter, out_filter, stride, activate_before_residual=False):
-        """Residual unit with 2 sub layers with preactivation, plan A shortcut."""
-
-        with tf.name_scope("residual_v2") as name_scope:
-            if activate_before_residual:
-                x = self._batch_norm(x)
-                x = self._relu(x)
-                orig_x = x
-            else:
-                orig_x = x
-                x = self._batch_norm(x)
-                x = self._relu(x)
-
-            x = self._conv(x, 3, out_filter, stride)
-
-            x = self._batch_norm(x)
-            x = self._relu(x)
-            x = self._conv(x, 3, out_filter, [1, 1, 1, 1])
-
-            if in_filter != out_filter:
-                pad = (out_filter - in_filter) // 2
-                orig_x = self._avg_pool(orig_x, stride, stride)
-                if self._data_format == "channels_first":
-                    orig_x = tf.pad(orig_x, [[0, 0], [pad, pad], [0, 0], [0, 0]])
-                else:
-                    orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [pad, pad]])
-
-            x = tf.add(x, orig_x)
-
-            tf.logging.info("image after unit %s: %s", name_scope, x.get_shape())
-            return x
-
-    def _bottleneck_residual_v2(
-        self, x, in_filter, out_filter, stride, activate_before_residual=False
-    ):
-        """Bottleneck residual unit with 3 sub layers, plan B shortcut."""
-
-        with tf.name_scope("bottle_residual_v2") as name_scope:
-            if activate_before_residual:
-                x = self._batch_norm(x)
-                x = self._relu(x)
-                orig_x = x
-            else:
-                orig_x = x
-                x = self._batch_norm(x)
-                x = self._relu(x)
-
-            x = self._conv(x, 1, out_filter // 4, stride, is_atrous=True)
-
-            x = self._batch_norm(x)
-            x = self._relu(x)
-            # pad when stride isn't unit
-            x = self._conv(x, 3, out_filter // 4, 1, is_atrous=True)
-
-            x = self._batch_norm(x)
-            x = self._relu(x)
-            x = self._conv(x, 1, out_filter, 1, is_atrous=True)
-
-            if in_filter != out_filter:
-                orig_x = self._conv(orig_x, 1, out_filter, stride, is_atrous=True)
-            x = tf.add(x, orig_x)
-
-            tf.logging.info("image after unit %s: %s", name_scope, x.get_shape())
-            return x
-
-    def _conv(self, x, kernel_size, filters, strides, is_atrous=False):
-        """Convolution."""
-
-        padding = "SAME"
-        if not is_atrous and strides > 1:
-            pad = kernel_size - 1
-            pad_beg = pad // 2
-            pad_end = pad - pad_beg
-            if self._data_format == "channels_first":
-                x = tf.pad(x, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
-            else:
-                x = tf.pad(x, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
-            padding = "VALID"
-        return tf.layers.conv2d(
-            inputs=x,
-            kernel_size=kernel_size,
-            filters=filters,
-            strides=strides,
-            padding=padding,
-            use_bias=False,
-            data_format=self._data_format,
-        )
-
-    def _batch_norm(self, x):
-        if self._data_format == "channels_first":
-            data_format = "NCHW"
-        else:
-            data_format = "NHWC"
-        return tf.contrib.layers.batch_norm(
-            x,
-            decay=self._batch_norm_decay,
-            center=True,
-            scale=True,
-            epsilon=self._batch_norm_epsilon,
-            is_training=self._is_training,
-            fused=True,
-            data_format=data_format,
-        )
-
-    def _relu(self, x):
-        return tf.nn.relu(x)
-
-    def _fully_connected(self, x, out_dim):
-        with tf.name_scope("fully_connected") as name_scope:
-            x = tf.layers.dense(x, out_dim)
-
-        tf.logging.info("image after unit %s: %s", name_scope, x.get_shape())
-        return x
-
-    def _avg_pool(self, x, pool_size, stride):
-        with tf.name_scope("avg_pool") as name_scope:
-            x = tf.layers.average_pooling2d(
-                x, pool_size, stride, "SAME", data_format=self._data_format
-            )
-
-        tf.logging.info("image after unit %s: %s", name_scope, x.get_shape())
-        return x
-
-    def _global_avg_pool(self, x):
-        with tf.name_scope("global_avg_pool") as name_scope:
-            assert x.get_shape().ndims == 4
-            if self._data_format == "channels_first":
-                x = tf.reduce_mean(x, [2, 3])
-            else:
-                x = tf.reduce_mean(x, [1, 2])
-        tf.logging.info("image after unit %s: %s", name_scope, x.get_shape())
-        return x
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/Dockerfile.gpu b/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/Dockerfile.gpu
deleted file mode 100644
index 55aa1c9..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/Dockerfile.gpu
+++ /dev/null
@@ -1,75 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM nvidia/cuda:9.2-base-ubuntu18.04
-
-RUN  echo "$LOG_TAG update and install basic packages" && \
-     apt-get -y update && apt-get install -y --no-install-recommends \
-        build-essential \
-        curl \
-        libfreetype6-dev \
-        libpng-dev \
-        libzmq3-dev \
-        pkg-config \
-        rsync \
-        software-properties-common \
-        unzip \
-        vim \
-        wget \
-        && \
-    apt-get install -y locales && \
-    locale-gen $LANG && \
-    apt-get clean && \
-    apt -y autoclean && \
-    apt -y dist-upgrade && \
-    apt-get install -y build-essential && \
-    rm -rf /var/lib/apt/lists/*
-
-ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
-RUN echo "$LOG_TAG Install java8" && \
-    apt-get -y update && \
-    apt-get install -y openjdk-8-jdk && \
-    rm -rf /var/lib/apt/lists/*
-
-# Install Zeppelin
-ENV Z_VERSION="0.7.3" \
-    Z_HOME="/zeppelin"
-
-RUN echo "$LOG_TAG Download Zeppelin binary" && \
-    wget -O /tmp/zeppelin-${Z_VERSION}-bin-all.tgz http://archive.apache.org/dist/zeppelin/zeppelin-${Z_VERSION}/zeppelin-${Z_VERSION}-bin-all.tgz && \
-    tar -zxvf /tmp/zeppelin-${Z_VERSION}-bin-all.tgz && \
-    rm -rf /tmp/zeppelin-${Z_VERSION}-bin-all.tgz && \
-    mv /zeppelin-${Z_VERSION}-bin-all ${Z_HOME}
-ENV PATH="${Z_HOME}/bin:${PATH}"
-
-RUN echo "$LOG_TAG Set locale" && \
-    echo "LC_ALL=en_US.UTF-8" >> /etc/environment && \
-    echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen && \
-    echo "LANG=en_US.UTF-8" > /etc/locale.conf && \
-    locale-gen en_US.UTF-8
-
-ENV LANG=en_US.UTF-8 \
-    LC_ALL=en_US.UTF-8
-
-COPY zeppelin-site.xml $Z_HOME/conf/zeppelin-site.xml
-COPY shiro.ini ${Z_HOME}/conf/shiro.ini
-RUN chmod 777 -R ${Z_HOME}
-
-COPY run_container.sh /usr/local/bin/run_container.sh
-RUN chmod 755 /usr/local/bin/run_container.sh
-
-EXPOSE 8080
-CMD ["/usr/local/bin/run_container.sh"]
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/run_container.sh b/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/run_container.sh
deleted file mode 100644
index 8b90920..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/run_container.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"${Z_HOME}/bin/zeppelin-daemon.sh" start
-while true; do
-    #perform the test
-    sleep 5
-done
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/shiro.ini b/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/shiro.ini
deleted file mode 100644
index 89f976a..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/shiro.ini
+++ /dev/null
@@ -1,120 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-[users]
-# List of users with their password allowed to access Zeppelin.
-# To use a different strategy (LDAP / Database / ...) check the shiro doc at http://shiro.apache.org/configuration.html#Configuration-INISections
-# To enable admin user, uncomment the following line and set an appropriate password.
-admin = admin, admin
-user1 = password2, role1, role2
-user2 = password3, role3
-user3 = password4, role2
-
-# Sample LDAP configuration, for user Authentication, currently tested for single Realm
-[main]
-### A sample for configuring Active Directory Realm
-#activeDirectoryRealm = org.apache.zeppelin.realm.ActiveDirectoryGroupRealm
-#activeDirectoryRealm.systemUsername = userNameA
-
-#use either systemPassword or hadoopSecurityCredentialPath, more details in http://zeppelin.apache.org/docs/latest/security/shiroauthentication.html
-#activeDirectoryRealm.systemPassword = passwordA
-#activeDirectoryRealm.hadoopSecurityCredentialPath = jceks://file/user/zeppelin/zeppelin.jceks
-#activeDirectoryRealm.searchBase = CN=Users,DC=SOME_GROUP,DC=COMPANY,DC=COM
-#activeDirectoryRealm.url = ldap://ldap.test.com:389
-#activeDirectoryRealm.groupRolesMap = "CN=admin,OU=groups,DC=SOME_GROUP,DC=COMPANY,DC=COM":"admin","CN=finance,OU=groups,DC=SOME_GROUP,DC=COMPANY,DC=COM":"finance","CN=hr,OU=groups,DC=SOME_GROUP,DC=COMPANY,DC=COM":"hr"
-#activeDirectoryRealm.authorizationCachingEnabled = false
-
-### A sample for configuring LDAP Directory Realm
-#ldapRealm = org.apache.zeppelin.realm.LdapGroupRealm
-## search base for ldap groups (only relevant for LdapGroupRealm):
-#ldapRealm.contextFactory.environment[ldap.searchBase] = dc=COMPANY,dc=COM
-#ldapRealm.contextFactory.url = ldap://ldap.test.com:389
-#ldapRealm.userDnTemplate = uid={0},ou=Users,dc=COMPANY,dc=COM
-#ldapRealm.contextFactory.authenticationMechanism = simple
-
-### A sample PAM configuration
-#pamRealm=org.apache.zeppelin.realm.PamRealm
-#pamRealm.service=sshd
-
-### A sample for configuring ZeppelinHub Realm
-#zeppelinHubRealm = org.apache.zeppelin.realm.ZeppelinHubRealm
-## Url of ZeppelinHub
-#zeppelinHubRealm.zeppelinhubUrl = https://www.zeppelinhub.com
-#securityManager.realms = $zeppelinHubRealm
-
-## A same for configuring Knox SSO Realm
-#knoxJwtRealm = org.apache.zeppelin.realm.jwt.KnoxJwtRealm
-#knoxJwtRealm.providerUrl = https://domain.example.com/
-#knoxJwtRealm.login = gateway/knoxsso/knoxauth/login.html
-#knoxJwtRealm.logout = gateway/knoxssout/api/v1/webssout
-#knoxJwtRealm.logoutAPI = true
-#knoxJwtRealm.redirectParam = originalUrl
-#knoxJwtRealm.cookieName = hadoop-jwt
-#knoxJwtRealm.publicKeyPath = /etc/zeppelin/conf/knox-sso.pem
-#
-#knoxJwtRealm.groupPrincipalMapping = group.principal.mapping
-#knoxJwtRealm.principalMapping = principal.mapping
-#authc = org.apache.zeppelin.realm.jwt.KnoxAuthenticationFilter
-
-sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager
-
-### If caching of user is required then uncomment below lines
-#cacheManager = org.apache.shiro.cache.MemoryConstrainedCacheManager
-#securityManager.cacheManager = $cacheManager
-
-### Enables 'HttpOnly' flag in Zeppelin cookies
-cookie = org.apache.shiro.web.servlet.SimpleCookie
-cookie.name = JSESSIONID
-cookie.httpOnly = true
-### Uncomment the below line only when Zeppelin is running over HTTPS
-#cookie.secure = true
-sessionManager.sessionIdCookie = $cookie
-
-securityManager.sessionManager = $sessionManager
-# 86,400,000 milliseconds = 24 hour
-securityManager.sessionManager.globalSessionTimeout = 86400000
-shiro.loginUrl = /api/login
-
-[roles]
-role1 = *
-role2 = *
-role3 = *
-admin = *
-
-[urls]
-# This section is used for url-based security. For details see the shiro.ini documentation.
-#
-# You can secure interpreter, configuration and credential information by urls.
-# Comment or uncomment the below urls that you want to hide:
-# anon means the access is anonymous.
-# authc means form based auth Security.
-#
-# IMPORTANT: Order matters: URL path expressions are evaluated against an incoming request
-# in the order they are defined and the FIRST MATCH WINS.
-#
-# To allow anonymous access to all but the stated urls,
-# uncomment the line second last line (/** = anon) and comment the last line (/** = authc)
-#
-/api/version = anon
-# Allow all authenticated users to restart interpreters on a notebook page.
-# Comment out the following line if you would like to authorize only admin users to restart interpreters.
-/api/interpreter/setting/restart/** = authc
-/api/interpreter/** = authc, roles[admin]
-/api/configurations/** = authc, roles[admin]
-/api/credential/** = authc, roles[admin]
-#/** = anon
-/** = authc
diff --git a/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/zeppelin-site.xml b/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/zeppelin-site.xml
deleted file mode 100644
index 2bde161..0000000
--- a/website/docs/userDocs/yarn/docker/tensorflow/zeppelin-notebook-example/zeppelin-site.xml
+++ /dev/null
@@ -1,569 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-
-  <property>
-    <name>zeppelin.server.addr</name>
-    <value>0.0.0.0</value>
-    <description>Server address</description>
-  </property>
-
-  <property>
-    <name>zeppelin.server.port</name>
-    <value>8080</value>
-    <description>Server port.</description>
-  </property>
-
-  <property>
-    <name>zeppelin.server.ssl.port</name>
-    <value>8443</value>
-    <description>Server ssl port. (used when ssl property is set to true)</description>
-  </property>
-
-  <property>
-    <name>zeppelin.server.context.path</name>
-    <value>/</value>
-    <description>Context Path of the Web Application</description>
-  </property>
-
-  <property>
-    <name>zeppelin.war.tempdir</name>
-    <value>webapps</value>
-    <description>Location of jetty temporary directory</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.dir</name>
-    <value>notebook</value>
-    <description>path or URI for notebook persist</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.homescreen</name>
-    <value></value>
-    <description>id of notebook to be displayed in homescreen. ex) 2A94M5J1Z Empty value displays default home screen</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.homescreen.hide</name>
-    <value>false</value>
-    <description>hide homescreen notebook from list when this value set to true</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.collaborative.mode.enable</name>
-    <value>true</value>
-    <description>Enable collaborative mode</description>
-  </property>
-
-  <!-- Google Cloud Storage notebook storage -->
-  <!--
-  <property>
-    <name>zeppelin.notebook.gcs.dir</name>
-    <value></value>
-    <description>
-      A GCS path in the form gs://bucketname/path/to/dir.
-      Notes are stored at {zeppelin.notebook.gcs.dir}/{notebook-id}/note.json
-   </description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.storage</name>
-    <value>org.apache.zeppelin.notebook.repo.GCSNotebookRepo</value>
-    <description>notebook persistence layer implementation</description>
-  </property>
-  -->
-
-  <!-- Amazon S3 notebook storage -->
-  <!-- Creates the following directory structure: s3://{bucket}/{username}/{notebook-id}/note.json -->
-  <!--
-  <property>
-    <name>zeppelin.notebook.s3.user</name>
-    <value>user</value>
-    <description>user name for s3 folder structure</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.s3.bucket</name>
-    <value>zeppelin</value>
-    <description>bucket name for notebook storage</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.s3.endpoint</name>
-    <value>s3.amazonaws.com</value>
-    <description>endpoint for s3 bucket</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.storage</name>
-    <value>org.apache.zeppelin.notebook.repo.S3NotebookRepo</value>
-    <description>notebook persistence layer implementation</description>
-  </property>
-  -->
-
-  <!-- Additionally, encryption is supported for notebook data stored in S3 -->
-  <!-- Use the AWS KMS to encrypt data -->
-  <!-- If used, the EC2 role assigned to the EMR cluster must have rights to use the given key -->
-  <!-- See https://aws.amazon.com/kms/ and http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html -->
-  <!--
-  <property>
-    <name>zeppelin.notebook.s3.kmsKeyID</name>
-    <value>AWS-KMS-Key-UUID</value>
-    <description>AWS KMS key ID used to encrypt notebook data in S3</description>
-  </property>
-  -->
-
-  <!-- provide region of your KMS key -->
-  <!-- See http://docs.aws.amazon.com/general/latest/gr/rande.html#kms_region for region codes names -->
-  <!--
-  <property>
-    <name>zeppelin.notebook.s3.kmsKeyRegion</name>
-    <value>us-east-1</value>
-    <description>AWS KMS key region in your AWS account</description>
-  </property>
-  -->
-
-  <!-- Use a custom encryption materials provider to encrypt data -->
-  <!-- No configuration is given to the provider, so you must use system properties or another means to configure -->
-  <!-- See https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/model/EncryptionMaterialsProvider.html -->
-  <!--
-  <property>
-    <name>zeppelin.notebook.s3.encryptionMaterialsProvider</name>
-    <value>provider implementation class name</value>
-    <description>Custom encryption materials provider used to encrypt notebook data in S3</description>
-  </property>
-  -->
-
-  <!-- Server-side encryption enabled for notebooks -->
-  <!--
-  <property>
-    <name>zeppelin.notebook.s3.sse</name>
-    <value>true</value>
-    <description>Server-side encryption enabled for notebooks</description>
-  </property>
-  -->
-
-  <!-- Optional override to control which signature algorithm should be used to sign AWS requests -->
-  <!-- Set this property to "S3SignerType" if your AWS S3 compatible APIs support only AWS Signature Version 2 such as Ceph. -->
-  <!--
-  <property>
-    <name>zeppelin.notebook.s3.signerOverride</name>
-    <value>S3SignerType</value>
-    <description>optional override to control which signature algorithm should be used to sign AWS requests</description>
-  </property>
-  -->
-
-  <!-- If using Azure for storage use the following settings -->
-  <!--
-  <property>
-    <name>zeppelin.notebook.azure.connectionString</name>
-    <value>DefaultEndpointsProtocol=https;AccountName=<accountName>;AccountKey=<accountKey></value>
-    <description>Azure account credentials</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.azure.share</name>
-    <value>zeppelin</value>
-    <description>share name for notebook storage</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.azure.user</name>
-    <value>user</value>
-    <description>optional user name for Azure folder structure</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.storage</name>
-    <value>org.apache.zeppelin.notebook.repo.AzureNotebookRepo</value>
-    <description>notebook persistence layer implementation</description>
-  </property>
-  -->
-
-  <!-- Notebook storage layer using local file system
-  <property>
-    <name>zeppelin.notebook.storage</name>
-    <value>org.apache.zeppelin.notebook.repo.VFSNotebookRepo</value>
-    <description>local notebook persistence layer implementation</description>
-  </property>
-  -->
-
-  <!-- Notebook storage layer using hadoop compatible file system
-  <property>
-    <name>zeppelin.notebook.storage</name>
-    <value>org.apache.zeppelin.notebook.repo.FileSystemNotebookRepo</value>
-    <description>Hadoop compatible file system notebook persistence layer implementation, such as local file system, hdfs, azure wasb, s3 and etc.</description>
-  </property>
-
-  <property>
-    <name>zeppelin.server.kerberos.keytab</name>
-    <value></value>
-    <description>keytab for accessing kerberized hdfs</description>
-  </property>
-
-  <property>
-    <name>zeppelin.server.kerberos.principal</name>
-    <value></value>
-    <description>principal for accessing kerberized hdfs</description>
-  </property>
-  -->
-
-  <!-- For connecting your Zeppelin with ZeppelinHub -->
-  <!--
-  <property>
-    <name>zeppelin.notebook.storage</name>
-    <value>org.apache.zeppelin.notebook.repo.GitNotebookRepo, org.apache.zeppelin.notebook.repo.zeppelinhub.ZeppelinHubRepo</value>
-    <description>two notebook persistence layers (versioned local + ZeppelinHub)</description>
-  </property>
-  -->
-
-  <!-- MongoDB notebook storage -->
-  <!--
-  <property>
-    <name>zeppelin.notebook.storage</name>
-    <value>org.apache.zeppelin.notebook.repo.MongoNotebookRepo</value>
-    <description>notebook persistence layer implementation</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.mongo.uri</name>
-    <value>mongodb://localhost</value>
-    <description>MongoDB connection URI used to connect to a MongoDB database server</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.mongo.database</name>
-    <value>zeppelin</value>
-    <description>database name for notebook storage</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.mongo.collection</name>
-    <value>notes</value>
-    <description>collection name for notebook storage</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.mongo.autoimport</name>
-    <value>false</value>
-    <description>import local notes into MongoDB automatically on startup</description>
-  </property>
-  -->
-
-  <property>
-    <name>zeppelin.notebook.storage</name>
-    <value>org.apache.zeppelin.notebook.repo.GitNotebookRepo</value>
-    <description>versioned notebook persistence layer implementation</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.one.way.sync</name>
-    <value>false</value>
-    <description>If there are multiple notebook storages, should we treat the first one as the only source of truth?</description>
-  </property>
-
-  <property>
-    <name>zeppelin.interpreter.dir</name>
-    <value>interpreter</value>
-    <description>Interpreter implementation base directory</description>
-  </property>
-
-  <property>
-    <name>zeppelin.interpreter.localRepo</name>
-    <value>local-repo</value>
-    <description>Local repository for interpreter's additional dependency loading</description>
-  </property>
-
-  <property>
-    <name>zeppelin.interpreter.dep.mvnRepo</name>
-    <value>http://repo1.maven.org/maven2/</value>
-    <description>Remote principal repository for interpreter's additional dependency loading</description>
-  </property>
-
-  <property>
-    <name>zeppelin.dep.localrepo</name>
-    <value>local-repo</value>
-    <description>Local repository for dependency loader</description>
-  </property>
-
-  <property>
-    <name>zeppelin.helium.node.installer.url</name>
-    <value>https://nodejs.org/dist/</value>
-    <description>Remote Node installer url for Helium dependency loader</description>
-  </property>
-
-  <property>
-    <name>zeppelin.helium.npm.installer.url</name>
-    <value>http://registry.npmjs.org/</value>
-    <description>Remote Npm installer url for Helium dependency loader</description>
-  </property>
-
-  <property>
-    <name>zeppelin.helium.yarnpkg.installer.url</name>
-    <value>https://github.com/yarnpkg/yarn/releases/download/</value>
-    <description>Remote Yarn package installer url for Helium dependency loader</description>
-  </property>
-
-  <property>
-    <name>zeppelin.interpreters</name>
-    <value>org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.rinterpreter.RRepl,org.apache.zeppelin.rinterpreter.KnitR,org.apache.zeppelin.spark.SparkRInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.file.HDFSFileInterpreter,org.apache. [...]
-    <description>Comma separated interpreter configurations. First interpreter become a default</description>
-  </property>
-
-  <property>
-    <name>zeppelin.interpreter.group.order</name>
-    <value>spark,md,angular,sh,livy,alluxio,file,psql,flink,python,ignite,lens,cassandra,geode,kylin,elasticsearch,scalding,jdbc,hbase,bigquery,beam,groovy</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>zeppelin.interpreter.connect.timeout</name>
-    <value>30000</value>
-    <description>Interpreter process connect timeout in msec.</description>
-  </property>
-
-  <property>
-    <name>zeppelin.interpreter.output.limit</name>
-    <value>102400</value>
-    <description>Output message from interpreter exceeding the limit will be truncated</description>
-  </property>
-
-  <property>
-    <name>zeppelin.ssl</name>
-    <value>false</value>
-    <description>Should SSL be used by the servers?</description>
-  </property>
-
-  <property>
-    <name>zeppelin.ssl.client.auth</name>
-    <value>false</value>
-    <description>Should client authentication be used for SSL connections?</description>
-  </property>
-
-  <property>
-    <name>zeppelin.ssl.keystore.path</name>
-    <value>keystore</value>
-    <description>Path to keystore relative to Zeppelin configuration directory</description>
-  </property>
-
-  <property>
-    <name>zeppelin.ssl.keystore.type</name>
-    <value>JKS</value>
-    <description>The format of the given keystore (e.g. JKS or PKCS12)</description>
-  </property>
-
-  <property>
-    <name>zeppelin.ssl.keystore.password</name>
-    <value>change me</value>
-    <description>Keystore password. Can be obfuscated by the Jetty Password tool</description>
-  </property>
-
-  <!--
-  <property>
-    <name>zeppelin.ssl.key.manager.password</name>
-    <value>change me</value>
-    <description>Key Manager password. Defaults to keystore password. Can be obfuscated.</description>
-  </property>
-  -->
-
-  <property>
-    <name>zeppelin.ssl.truststore.path</name>
-    <value>truststore</value>
-    <description>Path to truststore relative to Zeppelin configuration directory. Defaults to the keystore path</description>
-  </property>
-
-  <property>
-    <name>zeppelin.ssl.truststore.type</name>
-    <value>JKS</value>
-    <description>The format of the given truststore (e.g. JKS or PKCS12). Defaults to the same type as the keystore type</description>
-  </property>
-
-  <!--
-  <property>
-    <name>zeppelin.ssl.truststore.password</name>
-    <value>change me</value>
-    <description>Truststore password. Can be obfuscated by the Jetty Password tool. Defaults to the keystore password</description>
-  </property>
-  -->
-
-  <property>
-    <name>zeppelin.server.allowed.origins</name>
-    <value>*</value>
-    <description>Allowed sources for REST and WebSocket requests (i.e. http://onehost:8080,http://otherhost.com). If you leave * you are vulnerable to https://issues.apache.org/jira/browse/ZEPPELIN-173</description>
-  </property>
-
-  <property>
-    <name>zeppelin.anonymous.allowed</name>
-    <value>false</value>
-    <description>Anonymous user allowed by default</description>
-  </property>
-
-  <property>
-    <name>zeppelin.username.force.lowercase</name>
-    <value>false</value>
-    <description>Force convert username case to lower case, useful for Active Directory/LDAP. Default is not to change case</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.default.owner.username</name>
-    <value></value>
-    <description>Set owner role by default</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.public</name>
-    <value>true</value>
-    <description>Make notebook public by default when created, private otherwise</description>
-  </property>
-
-  <property>
-    <name>zeppelin.websocket.max.text.message.size</name>
-    <value>1024000</value>
-    <description>Size in characters of the maximum text message to be received by websocket. Defaults to 1024000</description>
-  </property>
-
-  <property>
-    <name>zeppelin.server.default.dir.allowed</name>
-    <value>false</value>
-    <description>Enable directory listings on server.</description>
-  </property>
-
-  <!--
-  <property>
-    <name>zeppelin.interpreter.lifecyclemanager.class</name>
-    <value>org.apache.zeppelin.interpreter.lifecycle.TimeoutLifecycleManager</value>
-    <description>LifecycleManager class for managing the lifecycle of interpreters, by default interpreter will
-    be closed after timeout</description>
-  </property>
-
-  <property>
-    <name>zeppelin.interpreter.lifecyclemanager.timeout.checkinterval</name>
-    <value>60000</value>
-    <description>Milliseconds of the interval to checking whether interpreter is time out</description>
-  </property>
-
-  <property>
-    <name>zeppelin.interpreter.lifecyclemanager.timeout.threshold</name>
-    <value>3600000</value>
-    <description>Milliseconds of the interpreter timeout threshold, by default it is 1 hour</description>
-  </property>
-  -->
-
-  <!--
-  <property>
-      <name>zeppelin.server.jetty.name</name>
-      <value>Jetty(7.6.0.v20120127)</value>
-      <description>Hardcoding Application Server name to Prevent Fingerprinting</description>
-  </property>
-  -->
-
-  <!--
-  <property>
-      <name>zeppelin.server.jetty.request.header.size</name>
-      <value>8192</value>
-      <description>Http Request Header Size Limit (to prevent HTTP 413)</description>
-  </property>
-  -->
-
-  <!--
-  <property>
-    <name>zeppelin.server.xframe.options</name>
-    <value>SAMEORIGIN</value>
-    <description>The X-Frame-Options HTTP response header can be used to indicate whether or not a browser should be allowed to render a page in a frame/iframe/object.</description>
-  </property>
-  -->
-
-  <!--
-  <property>
-    <name>zeppelin.server.strict.transport</name>
-    <value>max-age=631138519</value>
-    <description>The HTTP Strict-Transport-Security response header is a security feature that lets a web site tell browsers that it should only be communicated with using HTTPS, instead of using HTTP. Enable this when Zeppelin is running on HTTPS. Value is in Seconds, the default value is equivalent to 20 years.</description>
-  </property>
-  -->
-  <!--
-
-  <property>
-    <name>zeppelin.server.xxss.protection</name>
-    <value>1</value>
-    <description>The HTTP X-XSS-Protection response header is a feature of Internet Explorer, Chrome and Safari that stops pages from loading when they detect reflected cross-site scripting (XSS) attacks. When value is set to 1 and a cross-site scripting attack is detected, the browser will sanitize the page (remove the unsafe parts).</description>
-  </property>
-  -->
-
-  <!--
-  <property>
-    <name>zeppelin.interpreter.callback.portRange</name>
-    <value>10000:10010</value>
-  </property>
-  -->
-
-  <!--
-  <property>
-    <name>zeppelin.recovery.storage.class</name>
-    <value>org.apache.zeppelin.interpreter.recovery.FileSystemRecoveryStorage</value>
-    <description>ReoveryStorage implementation</description>
-  </property>
-  -->
-
-  <!--
-  <property>
-    <name>zeppelin.recovery.dir</name>
-    <value>recovery</value>
-    <description>Location where recovery metadata is stored</description>
-  </property>
-  -->
-
-  <!-- GitHub configurations
-  <property>
-    <name>zeppelin.notebook.git.remote.url</name>
-    <value></value>
-    <description>remote Git repository URL</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.git.remote.username</name>
-    <value>token</value>
-    <description>remote Git repository username</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.git.remote.access-token</name>
-    <value></value>
-    <description>remote Git repository password</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.git.remote.origin</name>
-    <value>origin</value>
-    <description>Git repository remote</description>
-  </property>
-
-  <property>
-    <name>zeppelin.notebook.cron.enable</name>
-    <value>false</value>
-    <description>Notebook enable cron scheduler feature</description>
-  </property>
-  <property>
-    <name>zeppelin.notebook.cron.folders</name>
-    <value></value>
-    <description>Notebook cron folders</description>
-  </property>
-  -->
-</configuration>
diff --git a/website/sidebars.js b/website/sidebars.js
index 74cca2d..d0a5bf3 100644
--- a/website/sidebars.js
+++ b/website/sidebars.js
@@ -50,11 +50,6 @@ module.exports = {
                     ],
                 },
             ],
-            "Administrator Docs": [
-                {
-                    "Submarine on Yarn": ["adminDocs/yarn/README"],
-                },
-            ],
             "Developer Docs": [
                 "devDocs/README",
                 "devDocs/Dependencies",

---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscribe@submarine.apache.org
For additional commands, e-mail: dev-help@submarine.apache.org