You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kylin.apache.org by xx...@apache.org on 2020/11/08 08:13:07 UTC

[kylin] 07/13: KYLIN-4775 Refactor & Fix HADOOP_CONF_DIR

This is an automated email from the ASF dual-hosted git repository.

xxyu pushed a commit to branch kylin-on-parquet-v2
in repository https://gitbox.apache.org/repos/asf/kylin.git

commit 768a6d662350d4d18a9e2ced4490df439104f537
Author: XiaoxiangYu <xx...@apache.org>
AuthorDate: Fri Oct 23 13:48:35 2020 +0800

    KYLIN-4775 Refactor & Fix HADOOP_CONF_DIR
---
 build/CI/run-ci.sh                                 | 115 +++++++++++++++++
 .../CI/testing/kylin_instances/kylin_instance.yml  |   2 +-
 build/CI/testing/kylin_utils/kylin.py              |   2 +-
 docker/README-cluster.md                           | 143 +++++++++++++++++++++
 docker/{README.md => README-standalone.md}         |  36 ++++--
 docker/README.md                                   | 143 +--------------------
 docker/build_cluster_images.sh                     |  52 ++++----
 docker/docker-compose/others/client-write-read.env |   4 +-
 docker/docker-compose/others/client-write.env      |   4 +-
 .../others/docker-compose-kylin-write-read.yml     |  30 +----
 .../others/docker-compose-kylin-write.yml          |  36 ++----
 .../others/docker-compose-metastore.yml            |   2 -
 docker/docker-compose/others/kylin/README.md       |   2 +
 .../docker-compose/read/docker-compose-hadoop.yml  |  16 +--
 .../docker-compose/read/docker-compose-hbase.yml   |   6 +-
 docker/docker-compose/read/read-hadoop.env         |   4 +-
 .../docker-compose/write/conf/hive/hive-site.xml   |  10 +-
 .../docker-compose/write/docker-compose-hadoop.yml |  22 ++--
 .../docker-compose/write/docker-compose-hbase.yml  |   6 +-
 .../docker-compose/write/docker-compose-hive.yml   |   4 +-
 docker/docker-compose/write/write-hadoop.env       |   4 +-
 docker/dockerfile/cluster/base/Dockerfile          |  21 +--
 docker/dockerfile/cluster/base/entrypoint.sh       |  42 +++---
 docker/dockerfile/cluster/client/Dockerfile        |  22 ++--
 docker/dockerfile/cluster/client/entrypoint.sh     |   6 +-
 docker/dockerfile/cluster/client/run_cli.sh        |   8 +-
 docker/dockerfile/cluster/datanode/Dockerfile      |   2 +-
 docker/dockerfile/cluster/hbase/Dockerfile         |   7 +-
 docker/dockerfile/cluster/hbase/entrypoint.sh      |   2 +-
 docker/dockerfile/cluster/historyserver/Dockerfile |   2 +-
 docker/dockerfile/cluster/hive/Dockerfile          |   2 +-
 docker/dockerfile/cluster/hive/conf/hive-site.xml  |   3 +-
 docker/dockerfile/cluster/hive/entrypoint.sh       |  40 +++---
 docker/dockerfile/cluster/hmaster/Dockerfile       |   2 +-
 docker/dockerfile/cluster/hregionserver/Dockerfile |   2 +-
 docker/dockerfile/cluster/kylin/Dockerfile         |   2 +-
 docker/dockerfile/cluster/namenode/Dockerfile      |   2 +-
 docker/dockerfile/cluster/nodemanager/Dockerfile   |   2 +-
 .../dockerfile/cluster/resourcemanager/Dockerfile  |   2 +-
 docker/header.sh                                   |  27 ++--
 docker/setup_cluster.sh                            |  16 ++-
 41 files changed, 479 insertions(+), 376 deletions(-)

diff --git a/build/CI/run-ci.sh b/build/CI/run-ci.sh
new file mode 100644
index 0000000..574f892
--- /dev/null
+++ b/build/CI/run-ci.sh
@@ -0,0 +1,115 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# 1. Packaging for Kylin binary
+# 2. Deploy hadoop cluster
+# 3. Delpoy kylin cluster
+# 4. Run system testing
+# 5. Clean up
+
+INIT_HADOOP=1
+
+###########################################
+###########################################
+# 0. Prepare
+export JAVA_HOME=/usr/local/java
+export PATH=$JAVA_HOME/bin:$PATH
+export PATH=/root/xiaoxiang.yu/INSTALL/anaconda/bin:$PATH
+binary_file=/root/xiaoxiang.yu/BINARY/apache-kylin-3.1.2-SNAPSHOT-bin.tar.gz
+source ~/.bashrc
+pwd
+
+###########################################
+###########################################
+# 1. Package kylin
+
+#TODO
+cd docker/docker-compose/others/kylin
+cp $binary_file .
+tar zxf apache-kylin-3.1.2-SNAPSHOT-bin.tar.gz
+
+mkdir kylin-all
+mkdir kylin-query
+mkdir kylin-job
+
+cp -r apache-kylin-3.1.2-SNAPSHOT-bin/* kylin-all
+cat > kylin-all/conf/kylin.properties <<EOL
+kylin.job.scheduler.default=100
+kylin.server.self-discovery-enabled=true
+EOL
+
+cp -r apache-kylin-3.1.2-SNAPSHOT-bin/* kylin-query
+cat > kylin-query/conf/kylin.properties <<EOL
+kylin.job.scheduler.default=100
+kylin.server.self-discovery-enabled=true
+EOL
+
+cp -r apache-kylin-3.1.2-SNAPSHOT-bin/* kylin-job
+cat > kylin-job/conf/kylin.properties <<EOL
+kylin.job.scheduler.default=100
+kylin.server.self-discovery-enabled=true
+EOL
+
+cd -
+
+###########################################
+###########################################
+# 2. Deploy Hadoop
+
+if [ "$INIT_HADOOP" = "1" ];
+then
+    echo "Restart Hadoop cluster."
+    cd docker
+
+    bash stop_cluster.sh
+
+    bash setup_cluster.sh --cluster_mode write --hadoop_version 2.8.5 --hive_version 1.2.2 \
+      --enable_hbase yes --hbase_version 1.1.2  --enable_ldap nosh setup_cluster.sh \
+      --cluster_mode write --hadoop_version 2.8.5 --hive_version 1.2.2 --enable_hbase yes \
+      --hbase_version 1.1.2  --enable_ldap no
+    cd ..
+else
+    echo "Do NOT restart Hadoop cluster."
+fi;
+
+docker ps
+
+###########################################
+###########################################
+# 3. Deploy Kylin
+
+# TODO
+
+###########################################
+###########################################
+# 4. Run test
+
+echo "Wait about 6 minutes ..."
+sleep 360
+
+cd build/CI/testing
+pip install -r requirements.txt
+gauge run --tags 3.x
+cd ..
+
+###########################################
+###########################################
+# 5. Clean up
+
+# TODO
diff --git a/build/CI/testing/kylin_instances/kylin_instance.yml b/build/CI/testing/kylin_instances/kylin_instance.yml
index ca76d00..fe6fdd1 100644
--- a/build/CI/testing/kylin_instances/kylin_instance.yml
+++ b/build/CI/testing/kylin_instances/kylin_instance.yml
@@ -1,6 +1,6 @@
 ---
 # All mode
-- host: kylin-all
+- host: localhost
   port: 7070
   version: 3.x
   hadoop_platform: HDP2.4
diff --git a/build/CI/testing/kylin_utils/kylin.py b/build/CI/testing/kylin_utils/kylin.py
index 252ce21..164f2ca 100644
--- a/build/CI/testing/kylin_utils/kylin.py
+++ b/build/CI/testing/kylin_utils/kylin.py
@@ -95,7 +95,7 @@ class KylinHttpClient(BasicHttpClient):  # pylint: disable=too-many-public-metho
         resp = self._request('DELETE', url)
         return resp
 
-    def load_table(self, project_name, tables, calculate=True):
+    def load_table(self, project_name, tables, calculate=False):
         """
         load or reload table api
         :param calculate: Default is True
diff --git a/docker/README-cluster.md b/docker/README-cluster.md
new file mode 100644
index 0000000..f90ce3b
--- /dev/null
+++ b/docker/README-cluster.md
@@ -0,0 +1,143 @@
+# Kylin deployment by docker-compose for CI/CD
+
+## Backgroud
+
+In order to provide hadoop cluster(s) (without manual deployment) for system level testing to cover some complex features like read write speratation deployment, we would like to provide a docker-compose based way to make it easy to achieve CI/CD .
+
+## Prepare
+
+- Install latest docker & docker-compose, following is what I use.
+
+- Check port 
+
+    Port       |     Component     |     Comment
+---------------| ----------------- | -----------------
+    7070       |       Kylin       |      All     
+    7071       |       Kylin       |      Job     
+    7072       |       Kylin       |      Query             
+    8088       |       Yarn        |       -    
+    16010      |       HBase       |       -    
+    50070      |       HDFS        |       -            
+
+- Clone cource code
+
+```shell 
+git clone
+cd kylin/docker
+```
+
+### How to start Hadoop cluster
+
+- Build and start a single Hadoop 2.8 cluster
+
+```shell
+bash setup_cluster.sh --s write --hadoop_version 2.8.5 --hive_version 1.2.2 \
+    --enable_hbase yes --hbase_version 1.1.2  --enable_ldap nosh setup_cluster.sh --cluster_mode write \
+    --hadoop_version 2.8.5 --hive_version 1.2.2 --enable_hbase yes --hbase_version 1.1.2  --enable_ldap no
+```
+
+## Docker Container
+
+#### Docker Containers
+
+- docker images
+
+```shell 
+root@open-source:/home/ubuntu/xiaoxiang.yu/docker# docker images | grep apachekylin
+apachekylin/kylin-client                   hadoop_2.8.5_hive_1.2.2_hbase_1.1.2   728d1cd89f46        12 hours ago        2.47GB
+apachekylin/kylin-hbase-regionserver       hbase_1.1.2                           41d3a6cd15ec        12 hours ago        1.13GB
+apachekylin/kylin-hbase-master             hbase_1.1.2                           848831eda695        12 hours ago        1.13GB
+apachekylin/kylin-hbase-base               hbase_1.1.2                           f6b9e2beb88e        12 hours ago        1.13GB
+apachekylin/kylin-hive                     hive_1.2.2_hadoop_2.8.5               eb8220ea58f0        12 hours ago        1.83GB
+apachekylin/kylin-hadoop-historyserver     hadoop_2.8.5                          f93b54c430f5        12 hours ago        1.63GB
+apachekylin/kylin-hadoop-nodemanager       hadoop_2.8.5                          88a0f4651047        12 hours ago        1.63GB
+apachekylin/kylin-hadoop-resourcemanager   hadoop_2.8.5                          32a58e854b6e        12 hours ago        1.63GB
+apachekylin/kylin-hadoop-datanode          hadoop_2.8.5                          5855d6a0a8d3        12 hours ago        1.63GB
+apachekylin/kylin-hadoop-namenode          hadoop_2.8.5                          4485f9d2beff        12 hours ago        1.63GB
+apachekylin/kylin-hadoop-base              hadoop_2.8.5                          1b1605941562        12 hours ago        1.63GB
+apachekylin/apache-kylin-standalone        3.1.0                                 2ce49ae43b7e        3 months ago        2.56GB
+```
+
+- docker containers
+
+```shell
+root@open-source:/home/ubuntu/xiaoxiang.yu/docker# docker ps
+CONTAINER ID        IMAGE                                                          COMMAND                  CREATED             STATUS                             PORTS                                                        NAMES
+4881c9b06eff        apachekylin/kylin-client:hadoop_2.8.5_hive_1.2.2_hbase_1.1.2   "/run_cli.sh"            8 seconds ago       Up 4 seconds                       0.0.0.0:7071->7070/tcp                                       kylin-job
+4faed91e3b52        apachekylin/kylin-client:hadoop_2.8.5_hive_1.2.2_hbase_1.1.2   "/run_cli.sh"            8 seconds ago       Up 5 seconds                       0.0.0.0:7072->7070/tcp                                       kylin-query
+b215230ab964        apachekylin/kylin-client:hadoop_2.8.5_hive_1.2.2_hbase_1.1.2   "/run_cli.sh"            8 seconds ago       Up 5 seconds                       0.0.0.0:7070->7070/tcp                                       kylin-all
+64f77396e9fb        apachekylin/kylin-hbase-regionserver:hbase_1.1.2               "/opt/entrypoint/hba…"   12 seconds ago      Up 8 seconds                       16020/tcp, 16030/tcp                                         write-hbase-regionserver1
+c263387ae9dd        apachekylin/kylin-hbase-regionserver:hbase_1.1.2               "/opt/entrypoint/hba…"   12 seconds ago      Up 10 seconds                      16020/tcp, 16030/tcp                                         write-hbase-regionserver2
+9721df1d412f        apachekylin/kylin-hbase-master:hbase_1.1.2                     "/opt/entrypoint/hba…"   12 seconds ago      Up 9 seconds                       16000/tcp, 0.0.0.0:16010->16010/tcp                          write-hbase-master
+ee859d1706ba        apachekylin/kylin-hive:hive_1.2.2_hadoop_2.8.5                 "/opt/entrypoint/hiv…"   20 seconds ago      Up 17 seconds                      0.0.0.0:10000->10000/tcp, 10002/tcp                          write-hive-server
+b9ef97438912        apachekylin/kylin-hive:hive_1.2.2_hadoop_2.8.5                 "/opt/entrypoint/hiv…"   20 seconds ago      Up 16 seconds                      9083/tcp, 10000/tcp, 10002/tcp                               write-hive-metastore
+edf687ecb3f0        mysql:5.7.24                                                   "docker-entrypoint.s…"   23 seconds ago      Up 20 seconds                      0.0.0.0:3306->3306/tcp, 33060/tcp                            metastore-db
+7f63c83dcb63        zookeeper:3.4.10                                               "/docker-entrypoint.…"   25 seconds ago      Up 23 seconds                      2888/tcp, 0.0.0.0:2181->2181/tcp, 3888/tcp                   write-zookeeper
+aaf514d200e0        apachekylin/kylin-hadoop-datanode:hadoop_2.8.5                 "/opt/entrypoint/had…"   28 seconds ago      Up 26 seconds                      50075/tcp                                                    write-datanode1
+6a73601eba35        apachekylin/kylin-hadoop-datanode:hadoop_2.8.5                 "/opt/entrypoint/had…"   33 seconds ago      Up 28 seconds                      50075/tcp                                                    write-datanode3
+934b5e7c8c08        apachekylin/kylin-hadoop-resourcemanager:hadoop_2.8.5          "/opt/entrypoint/had…"   33 seconds ago      Up 26 seconds (health: starting)   0.0.0.0:8088->8088/tcp                                       write-resourcemanager
+6405614c2b06        apachekylin/kylin-hadoop-nodemanager:hadoop_2.8.5              "/opt/entrypoint/had…"   33 seconds ago      Up 30 seconds (health: starting)   8042/tcp                                                     write-nodemanager2
+e004fc605295        apachekylin/kylin-hadoop-namenode:hadoop_2.8.5                 "/opt/entrypoint/had…"   33 seconds ago      Up 28 seconds (health: starting)   0.0.0.0:9870->9870/tcp, 8020/tcp, 0.0.0.0:50070->50070/tcp   write-namenode
+743105698b0f        apachekylin/kylin-hadoop-historyserver:hadoop_2.8.5            "/opt/entrypoint/had…"   33 seconds ago      Up 29 seconds (health: starting)   0.0.0.0:8188->8188/tcp                                       write-historyserver
+1b38135aeb2a        apachekylin/kylin-hadoop-nodemanager:hadoop_2.8.5              "/opt/entrypoint/had…"   33 seconds ago      Up 31 seconds (health: starting)   8042/tcp                                                     write-nodemanager1
+7f53f5a84533        apachekylin/kylin-hadoop-datanode:hadoop_2.8.5                 "/opt/entrypoint/had…"   33 seconds ago      Up 29 seconds                      50075/tcp                                                    write-datanode2
+``` 
+
+- edit /etc/hosts to make it easy to view Web UI
+
+```shell 
+10.1.2.41 write-namenode
+10.1.2.41 write-resourcemanager
+10.1.2.41 write-hbase-master 
+10.1.2.41 write-hive-server
+10.1.2.41 write-hive-metastore
+10.1.2.41 write-zookeeper
+10.1.2.41 metastore-db
+10.1.2.41 kylin-job
+10.1.2.41 kylin-query
+10.1.2.41 kylin-all
+```
+
+#### Hadoop cluster information
+
+-  Support Matrix
+
+Hadoop Version   |  Hive Version |  HBase Version |  Verified
+---------------- | ------------- | -------------- | ----------
+     2.8.5       |     1.2.2     |     1.1.2      |    Yes
+     3.1.4       |     2.3.7     |     2.2.6      | In progress
+
+- Component
+
+   hostname          |                        URL                       |       Tag       |        Comment
+------------------   | ------------------------------------------------ | --------------- | ------------------------
+write-namenode       | http://write-namenode:50070                      |       HDFS      |
+write-datanode1      |
+write-datanode2      |
+write-datanode3      |
+write-resourcemanager| http://write-resourcemanager:8088/cluster        |       YARN      |
+write-nodemanager1   | 
+write-nodemanager2   |
+write-historyserver  |
+write-hbase-master   | http://write-hbase-master:16010                  |       HBase     |
+write-hbase-regionserver1 |
+write-hbase-regionserver2 |
+write-hive-server    |                                                  |       Hive      |
+write-hive-metastore |                                                  |       Hive      |
+write-zookeeper      |                                                  |     Zookeeper   |
+metastore-db         |                                                  |       RDBMS     |
+kylin-job            | http://kylin-all:7071/kylin                      |       Kylin     |
+kylin-query          | http://kylin-all:7072/kylin                      |       Kylin     |
+kylin-all            | http://kylin-all:7070/kylin                      |       Kylin     |
+
+
+## System Testing
+### How to start Kylin
+
+```shell 
+copy kylin into /root/xiaoxiang.yu/kylin/docker/docker-compose/others/kylin
+
+cp kylin.tar.gz /root/xiaoxiang.yu/kylin/docker/docker-compose/others/kylin
+tar zxf kylin.tar.gz
+
+```
\ No newline at end of file
diff --git a/docker/README.md b/docker/README-standalone.md
similarity index 80%
copy from docker/README.md
copy to docker/README-standalone.md
index d137c8b..348a74e 100644
--- a/docker/README.md
+++ b/docker/README-standalone.md
@@ -1,21 +1,21 @@
+## Standalone/Self-contained Kylin deployment for learning
 
 In order to allow users to easily try Kylin, and to facilitate developers to verify and debug after modifying the source code. We provide the all-in-one Kylin docker image. In this image, each service that Kylin relies on is properly installed and deployed, including:
 
 - Jdk 1.8
 - Hadoop 2.7.0
 - Hive 1.2.1
-- Spark 2.4.6
-- Zookeeper 3.4.6
+- Hbase 1.1.2 (With Zookeeper)
+- Spark 2.3.1
 - Kafka 1.1.1
 - MySQL 5.1.73
-- Maven 3.6.1
 
 ## Quickly try Kylin with pre-built images
 
 We have pushed the Kylin images to the [docker hub](https://hub.docker.com/r/apachekylin/apache-kylin-standalone). You do not need to build the image locally, just pull the image from remote (you can browse docker hub to check the available versions):
 
 ```
-docker pull apachekylin/apache-kylin-standalone:4.0.0-alpha
+docker pull apachekylin/apache-kylin-standalone:3.1.0
 ```
 
 After the pull is successful, execute "sh run_container.sh" or the following command to start the container:
@@ -28,14 +28,17 @@ docker run -d \
 -p 50070:50070 \
 -p 8032:8032 \
 -p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+-p 16010:16010 \
+--name apache-kylin-standalone \
+apachekylin/apache-kylin-standalone:3.1.0
 ```
 
 The following services are automatically started when the container starts: 
 
 - NameNode, DataNode
 - ResourceManager, NodeManager
+- HBase
+- Kafka
 - Kylin
 
 and run automatically `$KYLIN_HOME/bin/sample.sh `, create a kylin_streaming_topic topic in Kafka and continue to send data to this topic. This is to let the users start the container and then experience the batch and streaming way to build the cube and query.
@@ -45,6 +48,7 @@ After the container is started, we can enter the container through the `docker e
 - Kylin Web UI: [http://127.0.0.1:7070/kylin/login](http://127.0.0.1:7070/kylin/login)
 - HDFS NameNode Web UI: [http://127.0.0.1:50070](http://127.0.0.1:50070/)
 - YARN ResourceManager Web UI: [http://127.0.0.1:8088](http://127.0.0.1:8088/)
+- HBase Web UI: [http://127.0.0.1:16010](http://127.0.0.1:16010/)
 
 In the container, the relevant environment variables are as follows: 
 
@@ -52,7 +56,8 @@ In the container, the relevant environment variables are as follows:
 JAVA_HOME=/home/admin/jdk1.8.0_141
 HADOOP_HOME=/home/admin/hadoop-2.7.0
 KAFKA_HOME=/home/admin/kafka_2.11-1.1.1
-SPARK_HOME=/home/admin/spark-2.4.6-bin-hadoop2.7
+SPARK_HOME=/home/admin/spark-2.3.1-bin-hadoop2.6
+HBASE_HOME=/home/admin/hbase-1.1.2
 HIVE_HOME=/home/admin/apache-hive-1.2.1-bin
 ```
 
@@ -60,15 +65,24 @@ After about 1 to 2 minutes, all the services should be started. At the Kylin log
 
 In the "Model" tab, you can click "Build" to build the two sample cubes. After the cubes be built, try some queries in the "Insight" tab.
 
-If you want to login into the Docker container, run "docker ps" to get the container id:
+If you want to login into the Docker container, run "docker exec -it apache-kylin-standalone bash" to login it with bash:
+
+```
+> docker exec -it apache-kylin-standalone bash
+[root@c15d10ff6bf1 admin]# ls
+apache-hive-1.2.1-bin                  apache-maven-3.6.1  first_run     hbase-1.1.2   kafka_2.11-1.1.1
+apache-kylin-3.0.0-alpha2-bin-hbase1x  entrypoint.sh       hadoop-2.7.0  jdk1.8.0_141  spark-2.3.1-bin-hadoop2.6
+```
+
+Or you can run "docker ps" to get the container id:
 
 ```
 > docker ps
 CONTAINER ID        IMAGE                                              COMMAND                  CREATED             STATUS              PORTS                                                                                                                                                NAMES
-c15d10ff6bf1        apachekylin/apache-kylin-standalone:3.0.1   "/home/admin/entrypo…"   55 minutes ago      Up 55 minutes       0.0.0.0:7070->7070/tcp, 0.0.0.0:8032->8032/tcp, 0.0.0.0:8042->8042/tcp, 0.0.0.0:8088->8088/tcp, 0.0.0.0:50070->50070/tcp, 0.0.0.0:16010->16010/tcp   romantic_moser
+c15d10ff6bf1        apachekylin/apache-kylin-standalone:3.1.0 "/home/admin/entrypo…"   55 minutes ago      Up 55 minutes       0.0.0.0:7070->7070/tcp, 0.0.0.0:8032->8032/tcp, 0.0.0.0:8042->8042/tcp, 0.0.0.0:8088->8088/tcp, 0.0.0.0:50070->50070/tcp, 0.0.0.0:16010->16010/tcp   romantic_moser
 ```
 
-Then run "docker -it <container id> bash" to login it with bash:
+Then run "docker exec -it <container id> bash" to login it with bash:
 
 ```
 > docker exec -it c15d10ff6bf1 bash
@@ -109,7 +123,7 @@ For example, if you made some code change in Kylin, you can make a new binary pa
 The new package is generated in "dist/" folder; Copy it to the "docker" folder:
 
 ```
-cp ./dist/apache-kylin-4.0.0-SNAPSHOT-bin.tar.gz ./docker
+cp ./dist/apache-kylin-3.1.0-SNAPSHOT-bin.tar.gz ./docker
 ```
 
 Use the "Dockerfile_dev" file to build:
diff --git a/docker/README.md b/docker/README.md
index d137c8b..124cceb 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -1,140 +1,7 @@
+## Kylin with docker
 
-In order to allow users to easily try Kylin, and to facilitate developers to verify and debug after modifying the source code. We provide the all-in-one Kylin docker image. In this image, each service that Kylin relies on is properly installed and deployed, including:
+Visit our offical docker repositories at https://hub.docker.com/r/apachekylin .
 
-- Jdk 1.8
-- Hadoop 2.7.0
-- Hive 1.2.1
-- Spark 2.4.6
-- Zookeeper 3.4.6
-- Kafka 1.1.1
-- MySQL 5.1.73
-- Maven 3.6.1
-
-## Quickly try Kylin with pre-built images
-
-We have pushed the Kylin images to the [docker hub](https://hub.docker.com/r/apachekylin/apache-kylin-standalone). You do not need to build the image locally, just pull the image from remote (you can browse docker hub to check the available versions):
-
-```
-docker pull apachekylin/apache-kylin-standalone:4.0.0-alpha
-```
-
-After the pull is successful, execute "sh run_container.sh" or the following command to start the container:
-
-```
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
-```
-
-The following services are automatically started when the container starts: 
-
-- NameNode, DataNode
-- ResourceManager, NodeManager
-- Kylin
-
-and run automatically `$KYLIN_HOME/bin/sample.sh `, create a kylin_streaming_topic topic in Kafka and continue to send data to this topic. This is to let the users start the container and then experience the batch and streaming way to build the cube and query.
-
-After the container is started, we can enter the container through the `docker exec` command. Of course, since we have mapped the specified port in the container to the local port, we can open the pages of each service directly in the native browser, such as: 
-
-- Kylin Web UI: [http://127.0.0.1:7070/kylin/login](http://127.0.0.1:7070/kylin/login)
-- HDFS NameNode Web UI: [http://127.0.0.1:50070](http://127.0.0.1:50070/)
-- YARN ResourceManager Web UI: [http://127.0.0.1:8088](http://127.0.0.1:8088/)
-
-In the container, the relevant environment variables are as follows: 
-
-```
-JAVA_HOME=/home/admin/jdk1.8.0_141
-HADOOP_HOME=/home/admin/hadoop-2.7.0
-KAFKA_HOME=/home/admin/kafka_2.11-1.1.1
-SPARK_HOME=/home/admin/spark-2.4.6-bin-hadoop2.7
-HIVE_HOME=/home/admin/apache-hive-1.2.1-bin
-```
-
-After about 1 to 2 minutes, all the services should be started. At the Kylin login page (http://127.0.0.1:7070/kylin), enter ADMIN:KYLIN to login, select the "learn_kylin" project. In the "Model" tab, you should be able to see two sample cubes: "kylin_sales_cube" and "kylin_streaming_cube". If they don't appear, go to the "System" tab, and then click "Reload metadata", they should be loaded.
-
-In the "Model" tab, you can click "Build" to build the two sample cubes. After the cubes be built, try some queries in the "Insight" tab.
-
-If you want to login into the Docker container, run "docker ps" to get the container id:
-
-```
-> docker ps
-CONTAINER ID        IMAGE                                              COMMAND                  CREATED             STATUS              PORTS                                                                                                                                                NAMES
-c15d10ff6bf1        apachekylin/apache-kylin-standalone:3.0.1   "/home/admin/entrypo…"   55 minutes ago      Up 55 minutes       0.0.0.0:7070->7070/tcp, 0.0.0.0:8032->8032/tcp, 0.0.0.0:8042->8042/tcp, 0.0.0.0:8088->8088/tcp, 0.0.0.0:50070->50070/tcp, 0.0.0.0:16010->16010/tcp   romantic_moser
-```
-
-Then run "docker -it <container id> bash" to login it with bash:
-
-```
-> docker exec -it c15d10ff6bf1 bash
-[root@c15d10ff6bf1 admin]# ls
-apache-hive-1.2.1-bin                  apache-maven-3.6.1  first_run     hbase-1.1.2   kafka_2.11-1.1.1
-apache-kylin-3.0.0-alpha2-bin-hbase1x  entrypoint.sh       hadoop-2.7.0  jdk1.8.0_141  spark-2.3.1-bin-hadoop2.6
-```
-
-## Build Docker image in local
-
-You can build the docker image by yourself with the provided Dockerfile. Here we separate the scripts into several files:
-
-- Dockerfile_hadoop: build a Hadoop image with Hadoop/Hive/HBase/Spark/Kafka and other components installed;
-- Dockerfile: based on the Hadoop image, download Kylin from apache website and then start all services.
-- Dockerfile_dev: similar with "Dockerfile", instead of downloading the released version, it copies local built Kylin package to the image.
-
-Others:
-- conf/: the Hadoop/HBase/Hive/Maven configuration files for this docker; Will copy them into the image on 'docker build';
-- entrypoint.sh: the entrypoint script, which will start all the services;
-
-The build is very simple:
-
-```
-./build_image.sh
-```
-The script will build the Hadoop image first, and then build Kylin image based on it. Depends on the network bandwidth, the first time may take a while.
-
-## Customize the Docker image
-
-You can customize these scripts and Dockerfile to make your image.
-
-For example, if you made some code change in Kylin, you can make a new binary package in local with:
-
-```
-./build/scripts/package.sh
-```
-
-The new package is generated in "dist/" folder; Copy it to the "docker" folder:
-
-```
-cp ./dist/apache-kylin-4.0.0-SNAPSHOT-bin.tar.gz ./docker
-```
-
-Use the "Dockerfile_dev" file to build:
-
-```
-docker build -f Dockerfile_dev -t apache-kylin-standalone:test .
-
-```
-
-## Build Docker image for your Hadoop environment
-
-You can run Kylin in Docker with your Hadoop cluster. In this case, you need to build a customized image:
-
-- Use the same version Hadoop components as your cluster;
-- Use your cluster's configuration files (copy to conf/);
-- Modify the "entrypoint.sh", only start Kylin, no need to start other Hadoop services;
-
-
-## Container resource recommendation
-
-In order to allow Kylin to build the cube smoothly, the memory resource we configured for Yarn NodeManager is 6G, plus the memory occupied by each service, please ensure that the memory of the container is not less than 8G, so as to avoid errors due to insufficient memory.
-
-For the resource setting method for the container, please refer to:
-
-- Mac user: <https://docs.docker.com/docker-for-mac/#advanced>
-- Linux user: <https://docs.docker.com/config/containers/resource_constraints/#memory>
-
----
+- For the purpose of quick-start/prepare a learning env, please choose to use [Standalone container deployment](./README-standalone.md) .
+- For the purpose of prepare a CI/CD & system testing, please choose to use [Docker-compose deployment](./README-cluster.md) .
+- For the production deployment, please choose to use [Kylin on Kubernetes](../kubernetes) .
diff --git a/docker/build_cluster_images.sh b/docker/build_cluster_images.sh
index b2aae80..d774434 100644
--- a/docker/build_cluster_images.sh
+++ b/docker/build_cluster_images.sh
@@ -21,44 +21,38 @@ WS_ROOT=`dirname $SCRIPT_PATH`
 
 source ${SCRIPT_PATH}/header.sh
 
-#docker build -t apachekylin/kylin-metastore:mysql_5.6.49 ./kylin/metastore-db
-#
-
-docker build -t apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/base
-docker build -t apachekylin/kylin-hadoop-namenode:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} --build-arg HADOOP_WEBHDFS_PORT=${HADOOP_WEBHDFS_PORT} ./dockerfile/cluster/namenode
-docker build -t apachekylin/kylin-hadoop-datanode:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} --build-arg HADOOP_DN_PORT=${HADOOP_DN_PORT} ./dockerfile/cluster/datanode
-docker build -t apachekylin/kylin-hadoop-resourcemanager:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/resourcemanager
-docker build -t apachekylin/kylin-hadoop-nodemanager:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/nodemanager
-docker build -t apachekylin/kylin-hadoop-historyserver:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/historyserver
-
-docker build -t apachekylin/kylin-hive:hive_${HIVE_VERSION}_hadoop_${HADOOP_VERSION} \
---build-arg HIVE_VERSION=${HIVE_VERSION} \
---build-arg HADOOP_VERSION=${HADOOP_VERSION} \
-./dockerfile/cluster/hive
+docker build -t apachekylin/kylin-ci-hadoop-base:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/base
+docker build -t apachekylin/kylin-ci-hadoop-namenode:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} --build-arg HADOOP_WEBHDFS_PORT=${HADOOP_WEBHDFS_PORT} ./dockerfile/cluster/namenode
+docker build -t apachekylin/kylin-ci-hadoop-datanode:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} --build-arg HADOOP_DN_PORT=${HADOOP_DN_PORT} ./dockerfile/cluster/datanode
+docker build -t apachekylin/kylin-ci-hadoop-resourcemanager:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/resourcemanager
+docker build -t apachekylin/kylin-ci-hadoop-nodemanager:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/nodemanager
+docker build -t apachekylin/kylin-ci-hadoop-historyserver:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/historyserver
+
+docker build -t apachekylin/kylin-ci-hive:hive_${HIVE_VERSION}_hadoop_${HADOOP_VERSION} \
+  --build-arg HIVE_VERSION=${HIVE_VERSION} \
+  --build-arg HADOOP_VERSION=${HADOOP_VERSION} \
+  ./dockerfile/cluster/hive
 
 if [ $ENABLE_HBASE == "yes" ]; then
-  docker build -t apachekylin/kylin-hbase-base:hbase_${HBASE_VERSION} --build-arg HBASE_VERSION=${HBASE_VERSION} ./dockerfile/cluster/hbase
-  docker build -t apachekylin/kylin-hbase-master:hbase_${HBASE_VERSION} --build-arg HBASE_VERSION=${HBASE_VERSION} ./dockerfile/cluster/hmaster
-  docker build -t apachekylin/kylin-hbase-regionserver:hbase_${HBASE_VERSION} --build-arg HBASE_VERSION=${HBASE_VERSION} ./dockerfile/cluster/hregionserver
+  docker build -t apachekylin/kylin-ci-hbase-base:hbase_${HBASE_VERSION} --build-arg HBASE_VERSION=${HBASE_VERSION} ./dockerfile/cluster/hbase
+  docker build -t apachekylin/kylin-ci-hbase-master:hbase_${HBASE_VERSION} --build-arg HBASE_VERSION=${HBASE_VERSION} ./dockerfile/cluster/hmaster
+  docker build -t apachekylin/kylin-ci-hbase-regionserver:hbase_${HBASE_VERSION} --build-arg HBASE_VERSION=${HBASE_VERSION} ./dockerfile/cluster/hregionserver
 fi
 
 if [ $ENABLE_KERBEROS == "yes" ]; then
-  docker build -t apachekylin/kylin-kerberos:latest ./dockerfile/cluster/kerberos
+  docker build -t apachekylin/kylin-ci-kerberos:latest ./dockerfile/cluster/kerberos
 fi
 
 if [ $ENABLE_LDAP == "yes" ]; then
   docker pull osixia/openldap:1.3.0
 fi
 
-#if [ $ENABLE_KAFKA == "yes" ]; then
-#  docker pull bitnami/kafka:2.0.0
-#fi
-docker pull bitnami/kafka:2.0.0
-
-docker pull mysql:5.6.49
+if [ $ENABLE_KAFKA == "yes" ]; then
+  docker pull bitnami/kafka:2.0.0
+fi
 
-docker build -t apachekylin/kylin-client:hadoop_${HADOOP_VERSION}_hive_${HIVE_VERSION}_hbase_${HBASE_VERSION} \
---build-arg HIVE_VERSION=${HIVE_VERSION} \
---build-arg HADOOP_VERSION=${HADOOP_VERSION} \
---build-arg HBASE_VERSION=${HBASE_VERSION} \
-./dockerfile/cluster/client
+docker build -t apachekylin/kylin-ci-client:hadoop_${HADOOP_VERSION}_hive_${HIVE_VERSION}_hbase_${HBASE_VERSION} \
+  --build-arg HIVE_VERSION=${HIVE_VERSION} \
+  --build-arg HADOOP_VERSION=${HADOOP_VERSION} \
+  --build-arg HBASE_VERSION=${HBASE_VERSION} \
+  ./dockerfile/cluster/client
diff --git a/docker/docker-compose/others/client-write-read.env b/docker/docker-compose/others/client-write-read.env
index c61e986..1a9ecad 100644
--- a/docker/docker-compose/others/client-write-read.env
+++ b/docker/docker-compose/others/client-write-read.env
@@ -26,8 +26,8 @@ YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
 YARN_CONF_yarn_timeline___service_hostname=write-historyserver
 YARN_CONF_mapreduce_map_output_compress=true
 YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
-YARN_CONF_yarn_nodemanager_resource_memory___mb=16384
-YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8
+YARN_CONF_yarn_nodemanager_resource_memory___mb=10240
+YARN_CONF_yarn_nodemanager_resource_cpu___vcores=6
 YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
 YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
 YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
diff --git a/docker/docker-compose/others/client-write.env b/docker/docker-compose/others/client-write.env
index edad60b..d47815c 100644
--- a/docker/docker-compose/others/client-write.env
+++ b/docker/docker-compose/others/client-write.env
@@ -26,8 +26,8 @@ YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
 YARN_CONF_yarn_timeline___service_hostname=write-historyserver
 YARN_CONF_mapreduce_map_output_compress=true
 YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
-YARN_CONF_yarn_nodemanager_resource_memory___mb=16384
-YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8
+YARN_CONF_yarn_nodemanager_resource_memory___mb=10240
+YARN_CONF_yarn_nodemanager_resource_cpu___vcores=6
 YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
 YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
 YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
diff --git a/docker/docker-compose/others/docker-compose-kylin-write-read.yml b/docker/docker-compose/others/docker-compose-kylin-write-read.yml
index cb67b06..0804db0 100644
--- a/docker/docker-compose/others/docker-compose-kylin-write-read.yml
+++ b/docker/docker-compose/others/docker-compose-kylin-write-read.yml
@@ -6,17 +6,11 @@ services:
     container_name: kylin-all
     hostname: kylin-all
     volumes:
-      - ./conf/hadoop:/etc/hadoop/conf
-      - ./conf/hbase:/etc/hbase/conf
-      - ./conf/hive:/etc/hive/conf
-      - ./kylin/kylin-all:/opt/kylin/kylin-all
+      - ./kylin/kylin-all:/opt/kylin/
     env_file:
       - client-write-read.env
     environment:
-      HADOOP_CONF_DIR: /etc/hadoop/conf
-      HIVE_CONF_DIR: /etc/hive/conf
-      HBASE_CONF_DIR: /etc/hbase/conf
-      KYLIN_HOME: /opt/kylin/kylin-all
+      KYLIN_HOME: /opt/kylin/
     networks:
       - write_kylin
     ports:
@@ -27,17 +21,11 @@ services:
     container_name: kylin-job
     hostname: kylin-job
     volumes:
-      - ./conf/hadoop:/etc/hadoop/conf
-      - ./conf/hbase:/etc/hbase/conf
-      - ./conf/hive:/etc/hive/conf
-      - ./kylin/kylin-job:/opt/kylin/kylin-job
+      - ./kylin/kylin-job:/opt/kylin/
     env_file:
       - client-write-read.env
     environment:
-      HADOOP_CONF_DIR: /etc/hadoop/conf
-      HIVE_CONF_DIR: /etc/hive/conf
-      HBASE_CONF_DIR: /etc/hbase/conf
-      KYLIN_HOME: /opt/kylin/kylin-job
+      KYLIN_HOME: /opt/kylin/
     networks:
       - write_kylin
     ports:
@@ -48,17 +36,11 @@ services:
     container_name: kylin-query
     hostname: kylin-query
     volumes:
-      - ./conf/hadoop:/etc/hadoop/conf
-      - ./conf/hbase:/etc/hbase/conf
-      - ./conf/hive:/etc/hive/conf
-      - ./kylin/kylin-query:/opt/kylin/kylin-query
+      - ./kylin/kylin-query:/opt/kylin/
     env_file:
       - client-write-read.env
     environment:
-      HADOOP_CONF_DIR: /etc/hadoop/conf
-      HIVE_CONF_DIR: /etc/hive/conf
-      HBASE_CONF_DIR: /etc/hbase/conf
-      KYLIN_HOME: /opt/kylin/kylin-query
+      KYLIN_HOME: /opt/kylin/
     networks:
       - write_kylin
     ports:
diff --git a/docker/docker-compose/others/docker-compose-kylin-write.yml b/docker/docker-compose/others/docker-compose-kylin-write.yml
index a78b88a..e0c1c81 100644
--- a/docker/docker-compose/others/docker-compose-kylin-write.yml
+++ b/docker/docker-compose/others/docker-compose-kylin-write.yml
@@ -3,20 +3,16 @@ version: "3.3"
 services:
   kylin-all:
     image: ${CLIENT_IMAGETAG}
+    labels:
+      org.apache.kylin.description: "This is the All role in Kylin."
     container_name: kylin-all
     hostname: kylin-all
     volumes:
-      - ./conf/hadoop:/etc/hadoop/conf
-      - ./conf/hbase:/etc/hbase/conf
-      - ./conf/hive:/etc/hive/conf
-      - ./kylin/kylin-all:/opt/kylin/kylin-all
+      - ./kylin/kylin-all:/opt/kylin
     env_file:
       - client-write.env
     environment:
-      HADOOP_CONF_DIR: /etc/hadoop/conf
-      HIVE_CONF_DIR: /etc/hive/conf
-      HBASE_CONF_DIR: /etc/hbase/conf
-      KYLIN_HOME: /opt/kylin/kylin-all
+      KYLIN_HOME: /opt/kylin/
     networks:
       - write_kylin
     ports:
@@ -24,20 +20,16 @@ services:
 
   kylin-job:
     image: ${CLIENT_IMAGETAG}
+    labels:
+      org.apache.kylin.description: "This is the Job role in Kylin."
     container_name: kylin-job
     hostname: kylin-job
     volumes:
-      - ./conf/hadoop:/etc/hadoop/conf
-      - ./conf/hbase:/etc/hbase/conf
-      - ./conf/hive:/etc/hive/conf
-      - ./kylin/kylin-job:/opt/kylin/kylin-job
+      - ./kylin/kylin-job:/opt/kylin/
     env_file:
       - client-write.env
     environment:
-      HADOOP_CONF_DIR: /etc/hadoop/conf
-      HIVE_CONF_DIR: /etc/hive/conf
-      HBASE_CONF_DIR: /etc/hbase/conf
-      KYLIN_HOME: /opt/kylin/kylin-job
+      KYLIN_HOME: /opt/kylin/
     networks:
       - write_kylin
     ports:
@@ -45,20 +37,16 @@ services:
 
   kylin-query:
     image: ${CLIENT_IMAGETAG}
+    labels:
+      org.apache.kylin.description: "This is the Query role in Kylin."
     container_name: kylin-query
     hostname: kylin-query
     volumes:
-      - ./conf/hadoop:/etc/hadoop/conf
-      - ./conf/hbase:/etc/hbase/conf
-      - ./conf/hive:/etc/hive/conf
-      - ./kylin/kylin-query:/opt/kylin/kylin-query
+      - ./kylin/kylin-query:/opt/kylin/
     env_file:
       - client-write.env
     environment:
-      HADOOP_CONF_DIR: /etc/hadoop/conf
-      HIVE_CONF_DIR: /etc/hive/conf
-      HBASE_CONF_DIR: /etc/hbase/conf
-      KYLIN_HOME: /opt/kylin/kylin-query
+      KYLIN_HOME: /opt/kylin/
     networks:
       - write_kylin
     ports:
diff --git a/docker/docker-compose/others/docker-compose-metastore.yml b/docker/docker-compose/others/docker-compose-metastore.yml
index a36df07..e237f33 100644
--- a/docker/docker-compose/others/docker-compose-metastore.yml
+++ b/docker/docker-compose/others/docker-compose-metastore.yml
@@ -2,8 +2,6 @@ version: "3.3"
 
 services:
   metastore-db:
-#    image: mysql:5.6.49
-#    image: mysql:8.0.11
     image: mysql:5.7.24
     container_name: metastore-db
     hostname: metastore-db
diff --git a/docker/docker-compose/others/kylin/README.md b/docker/docker-compose/others/kylin/README.md
new file mode 100644
index 0000000..fc03d17
--- /dev/null
+++ b/docker/docker-compose/others/kylin/README.md
@@ -0,0 +1,2 @@
+
+Please put Kylin here.
\ No newline at end of file
diff --git a/docker/docker-compose/read/docker-compose-hadoop.yml b/docker/docker-compose/read/docker-compose-hadoop.yml
index a0e2a66..69888c2 100644
--- a/docker/docker-compose/read/docker-compose-hadoop.yml
+++ b/docker/docker-compose/read/docker-compose-hadoop.yml
@@ -2,7 +2,7 @@ version: "3.3"
 
 services:
   read-namenode:
-    image: ${HADOOP_NAMENODE_IMAGETAG:-apachekylin/kylin-hadoop-namenode:hadoop_2.8.5}
+    image: ${HADOOP_NAMENODE_IMAGETAG:-apachekylin/kylin-ci-hadoop-namenode:hadoop_2.8.5}
     container_name: read-namenode
     hostname: read-namenode
     volumes:
@@ -21,7 +21,7 @@ services:
       - 9871:9870
 
   read-datanode1:
-    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-hadoop-datanode:hadoop_2.8.5}
+    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-ci-hadoop-datanode:hadoop_2.8.5}
     container_name: read-datanode1
     hostname: read-datanode1
     volumes:
@@ -39,7 +39,7 @@ services:
       - ${HADOOP_DN_PORT:-50075}
 
   read-datanode2:
-    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-hadoop-datanode:hadoop_2.8.5}
+    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-ci-hadoop-datanode:hadoop_2.8.5}
     container_name: read-datanode2
     hostname: read-datanode2
     volumes:
@@ -55,7 +55,7 @@ services:
       - ${HADOOP_DN_PORT:-50075}
 
   read-datanode3:
-    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-hadoop-datanode:hadoop_2.8.5}
+    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-ci-hadoop-datanode:hadoop_2.8.5}
     container_name: read-datanode3
     hostname: read-datanode3
     volumes:
@@ -71,7 +71,7 @@ services:
       - ${HADOOP_DN_PORT:-50075}
 
   read-resourcemanager:
-    image: ${HADOOP_RESOURCEMANAGER_IMAGETAG:-apachekylin/kylin-hadoop-resourcemanager:hadoop_2.8.5}
+    image: ${HADOOP_RESOURCEMANAGER_IMAGETAG:-apachekylin/kylin-ci-hadoop-resourcemanager:hadoop_2.8.5}
     container_name: read-resourcemanager
     hostname: read-resourcemanager
     environment:
@@ -85,7 +85,7 @@ services:
       - 8089:8088
 
   read-nodemanager1:
-    image: ${HADOOP_NODEMANAGER_IMAGETAG:-apachekylin/kylin-hadoop-nodemanager:hadoop_2.8.5}
+    image: ${HADOOP_NODEMANAGER_IMAGETAG:-apachekylin/kylin-ci-hadoop-nodemanager:hadoop_2.8.5}
     container_name: read-nodemanager1
     hostname: read-nodemanager1
     environment:
@@ -97,7 +97,7 @@ services:
       - write_kylin
 
   read-nodemanager2:
-    image: ${HADOOP_NODEMANAGER_IMAGETAG:-apachekylin/kylin-hadoop-nodemanager:hadoop_2.8.5}
+    image: ${HADOOP_NODEMANAGER_IMAGETAG:-apachekylin/kylin-ci-hadoop-nodemanager:hadoop_2.8.5}
     container_name: read-nodemanager2
     hostname: read-nodemanager2
     environment:
@@ -109,7 +109,7 @@ services:
       - write_kylin
 
   read-historyserver:
-    image: ${HADOOP_HISTORYSERVER_IMAGETAG:-apachekylin/kylin-hadoop-historyserver:hadoop_2.8.5}
+    image: ${HADOOP_HISTORYSERVER_IMAGETAG:-apachekylin/kylin-ci-hadoop-historyserver:hadoop_2.8.5}
     container_name: read-historyserver
     hostname: read-historyserver
     volumes:
diff --git a/docker/docker-compose/read/docker-compose-hbase.yml b/docker/docker-compose/read/docker-compose-hbase.yml
index ac4048b..5f158cb 100644
--- a/docker/docker-compose/read/docker-compose-hbase.yml
+++ b/docker/docker-compose/read/docker-compose-hbase.yml
@@ -2,7 +2,7 @@ version: "3.3"
 
 services:
   read-hbase-master:
-    image: ${HBASE_MASTER_IMAGETAG:-apachekylin/kylin-hbase-master:hbase1.1.2}
+    image: ${HBASE_MASTER_IMAGETAG:-apachekylin/kylin-ci-hbase-master:hbase1.1.2}
     container_name: read-hbase-master
     hostname: read-hbase-master
     env_file:
@@ -15,7 +15,7 @@ services:
       - 16010:16010
 
   read-hbase-regionserver1:
-    image: ${HBASE_REGIONSERVER_IMAGETAG:-apachekylin/kylin-hbase-regionserver:hbase_1.1.2}
+    image: ${HBASE_REGIONSERVER_IMAGETAG:-apachekylin/kylin-ci-hbase-regionserver:hbase_1.1.2}
     container_name: read-hbase-regionserver1
     hostname: read-hbase-regionserver1
     env_file:
@@ -27,7 +27,7 @@ services:
       - write_kylin
 
   read-hbase-regionserver2:
-    image: ${HBASE_REGIONSERVER_IMAGETAG:-apachekylin/kylin-hbase-regionserver:hbase_1.1.2}
+    image: ${HBASE_REGIONSERVER_IMAGETAG:-apachekylin/kylin-ci-hbase-regionserver:hbase_1.1.2}
     container_name: read-hbase-regionserver2
     hostname: read-hbase-regionserver2
     env_file:
diff --git a/docker/docker-compose/read/read-hadoop.env b/docker/docker-compose/read/read-hadoop.env
index 9c0086d..5290caa 100644
--- a/docker/docker-compose/read/read-hadoop.env
+++ b/docker/docker-compose/read/read-hadoop.env
@@ -26,8 +26,8 @@ YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
 YARN_CONF_yarn_timeline___service_hostname=read-historyserver
 YARN_CONF_mapreduce_map_output_compress=true
 YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
-YARN_CONF_yarn_nodemanager_resource_memory___mb=16384
-YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8
+YARN_CONF_yarn_nodemanager_resource_memory___mb=10240
+YARN_CONF_yarn_nodemanager_resource_cpu___vcores=5
 YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
 YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
 YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
diff --git a/docker/docker-compose/write/conf/hive/hive-site.xml b/docker/docker-compose/write/conf/hive/hive-site.xml
index c60fe36..ab7779b 100644
--- a/docker/docker-compose/write/conf/hive/hive-site.xml
+++ b/docker/docker-compose/write/conf/hive/hive-site.xml
@@ -1,5 +1,6 @@
 <?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
    this work for additional information regarding copyright ownership.
@@ -14,11 +15,12 @@
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
---><configuration>
+-->
+<configuration>
     <property><name>hive.metastore.uris</name><value>thrift://write-hive-metastore:9083</value></property>
     <property><name>datanucleus.autoCreateSchema</name><value>false</value></property>
-    <property><name>javax.jdo.option.ConnectionURL</name><value>jdbc:postgresql://write-hive-metastore-postgresql/metastore</value></property>
-    <property><name>javax.jdo.option.ConnectionDriverName</name><value>org.postgresql.Driver</value></property>
+    <property><name>javax.jdo.option.ConnectionURL</name><value>jdbc:mysql://metastore-db/metastore?useSSL=false\&amp;allowPublicKeyRetrieval=true</value></property>
+    <property><name>javax.jdo.option.ConnectionDriverName</name><value>com.mysql.cj.jdbc.Driver</value></property>
     <property><name>javax.jdo.option.ConnectionPassword</name><value>hive</value></property>
     <property><name>javax.jdo.option.ConnectionUserName</name><value>hive</value></property>
 </configuration>
diff --git a/docker/docker-compose/write/docker-compose-hadoop.yml b/docker/docker-compose/write/docker-compose-hadoop.yml
index 4286cfc..8c75f37 100644
--- a/docker/docker-compose/write/docker-compose-hadoop.yml
+++ b/docker/docker-compose/write/docker-compose-hadoop.yml
@@ -2,7 +2,7 @@ version: "3.3"
 
 services:
   write-namenode:
-    image: ${HADOOP_NAMENODE_IMAGETAG:-apachekylin/kylin-hadoop-namenode:hadoop_2.8.5}
+    image: ${HADOOP_NAMENODE_IMAGETAG:-apachekylin/kylin-ci-hadoop-namenode:hadoop_2.8.5}
     container_name: write-namenode
     hostname: write-namenode
     volumes:
@@ -21,7 +21,7 @@ services:
       - 9870:9870
 
   write-datanode1:
-    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-hadoop-datanode:hadoop_2.8.5}
+    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-ci-hadoop-datanode:hadoop_2.8.5}
     container_name: write-datanode1
     hostname: write-datanode1
     volumes:
@@ -33,13 +33,11 @@ services:
       - write-hadoop.env
     networks:
       - kylin
-    links:
-      - write-namenode
     expose:
       - ${HADOOP_DN_PORT:-50075}
 
   write-datanode2:
-    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-hadoop-datanode:hadoop_2.8.5}
+    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-ci-hadoop-datanode:hadoop_2.8.5}
     container_name: write-datanode2
     hostname: write-datanode2
     volumes:
@@ -55,7 +53,7 @@ services:
       - ${HADOOP_DN_PORT:-50075}
 
   write-datanode3:
-    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-hadoop-datanode:hadoop_2.8.5}
+    image: ${HADOOP_DATANODE_IMAGETAG:-apachekylin/kylin-ci-hadoop-datanode:hadoop_2.8.5}
     container_name: write-datanode3
     hostname: write-datanode3
     volumes:
@@ -71,7 +69,7 @@ services:
       - ${HADOOP_DN_PORT:-50075}
 
   write-resourcemanager:
-    image: ${HADOOP_RESOURCEMANAGER_IMAGETAG:-apachekylin/kylin-hadoop-resourcemanager:hadoop_2.8.5}
+    image: ${HADOOP_RESOURCEMANAGER_IMAGETAG:-apachekylin/kylin-ci-hadoop-resourcemanager:hadoop_2.8.5}
     container_name: write-resourcemanager
     hostname: write-resourcemanager
     environment:
@@ -85,7 +83,7 @@ services:
       - 8088:8088
 
   write-nodemanager1:
-    image: ${HADOOP_NODEMANAGER_IMAGETAG:-apachekylin/kylin-hadoop-nodemanager:hadoop_2.8.5}
+    image: ${HADOOP_NODEMANAGER_IMAGETAG:-apachekylin/kylin-ci-hadoop-nodemanager:hadoop_2.8.5}
     container_name: write-nodemanager1
     hostname: write-nodemanager1
     environment:
@@ -95,9 +93,11 @@ services:
       - write-hadoop.env
     networks:
       - kylin
+    ports:
+      - 8044:8042
 
   write-nodemanager2:
-    image: ${HADOOP_NODEMANAGER_IMAGETAG:-apachekylin/kylin-hadoop-nodemanager:hadoop_2.8.5}
+    image: ${HADOOP_NODEMANAGER_IMAGETAG:-apachekylin/kylin-ci-hadoop-nodemanager:hadoop_2.8.5}
     container_name: write-nodemanager2
     hostname: write-nodemanager2
     environment:
@@ -107,9 +107,11 @@ services:
       - write-hadoop.env
     networks:
       - kylin
+    ports:
+      - 8043:8042
 
   write-historyserver:
-    image: ${HADOOP_HISTORYSERVER_IMAGETAG:-apachekylin/kylin-hadoop-historyserver:hadoop_2.8.5}
+    image: ${HADOOP_HISTORYSERVER_IMAGETAG:-apachekylin/kylin-ci-hadoop-historyserver:hadoop_2.8.5}
     container_name: write-historyserver
     hostname: write-historyserver
     volumes:
diff --git a/docker/docker-compose/write/docker-compose-hbase.yml b/docker/docker-compose/write/docker-compose-hbase.yml
index d95f32b..5539f9d 100644
--- a/docker/docker-compose/write/docker-compose-hbase.yml
+++ b/docker/docker-compose/write/docker-compose-hbase.yml
@@ -2,7 +2,7 @@ version: "3.3"
 
 services:
   write-hbase-master:
-    image: ${HBASE_MASTER_IMAGETAG:-apachekylin/kylin-hbase-master:hbase1.1.2}
+    image: ${HBASE_MASTER_IMAGETAG:-apachekylin/kylin-ci-hbase-master:hbase1.1.2}
     container_name: write-hbase-master
     hostname: write-hbase-master
     env_file:
@@ -15,7 +15,7 @@ services:
       - 16010:16010
 
   write-hbase-regionserver1:
-    image: ${HBASE_REGIONSERVER_IMAGETAG:-apachekylin/kylin-hbase-regionserver:hbase_1.1.2}
+    image: ${HBASE_REGIONSERVER_IMAGETAG:-apachekylin/kylin-ci-hbase-regionserver:hbase_1.1.2}
     container_name: write-hbase-regionserver1
     hostname: write-hbase-regionserver1
     env_file:
@@ -27,7 +27,7 @@ services:
       - write_kylin
 
   write-hbase-regionserver2:
-    image: ${HBASE_REGIONSERVER_IMAGETAG:-apachekylin/kylin-hbase-regionserver:hbase_1.1.2}
+    image: ${HBASE_REGIONSERVER_IMAGETAG:-apachekylin/kylin-ci-hbase-regionserver:hbase_1.1.2}
     container_name: write-hbase-regionserver2
     hostname: write-hbase-regionserver2
     env_file:
diff --git a/docker/docker-compose/write/docker-compose-hive.yml b/docker/docker-compose/write/docker-compose-hive.yml
index 9b94a34..54459ff 100644
--- a/docker/docker-compose/write/docker-compose-hive.yml
+++ b/docker/docker-compose/write/docker-compose-hive.yml
@@ -2,7 +2,7 @@ version: "3.3"
 
 services:
   write-hive-server:
-    image: ${HIVE_IMAGETAG:-apachekylin/kylin-hive:hive_1.2.2_hadoop_2.8.5}
+    image: ${HIVE_IMAGETAG:-apachekylin/kylin-ci-hive:hive_1.2.2_hadoop_2.8.5}
     container_name: write-hive-server
     hostname: write-hive-server
     env_file:
@@ -17,7 +17,7 @@ services:
       - 10000:10000
 
   write-hive-metastore:
-    image: ${HIVE_IMAGETAG:-apachekylin/kylin-hive:hive_1.2.2_hadoop_2.8.5}
+    image: ${HIVE_IMAGETAG:-apachekylin/kylin-ci-hive:hive_1.2.2_hadoop_2.8.5}
     container_name: write-hive-metastore
     hostname: write-hive-metastore
     env_file:
diff --git a/docker/docker-compose/write/write-hadoop.env b/docker/docker-compose/write/write-hadoop.env
index ef4429a..670756f 100644
--- a/docker/docker-compose/write/write-hadoop.env
+++ b/docker/docker-compose/write/write-hadoop.env
@@ -26,8 +26,8 @@ YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
 YARN_CONF_yarn_timeline___service_hostname=write-historyserver
 YARN_CONF_mapreduce_map_output_compress=true
 YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
-YARN_CONF_yarn_nodemanager_resource_memory___mb=16384
-YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8
+YARN_CONF_yarn_nodemanager_resource_memory___mb=10240
+YARN_CONF_yarn_nodemanager_resource_cpu___vcores=6
 YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
 YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
 YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
diff --git a/docker/dockerfile/cluster/base/Dockerfile b/docker/dockerfile/cluster/base/Dockerfile
index 8cf5ff0..ebfe227 100644
--- a/docker/dockerfile/cluster/base/Dockerfile
+++ b/docker/dockerfile/cluster/base/Dockerfile
@@ -38,17 +38,6 @@ RUN wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2
     && tar -zxvf /opt/jdk-8u141-linux-x64.tar.gz -C /opt/ \
     && rm -f /opt/jdk-8u141-linux-x64.tar.gz
 
-# use buildkit
-#IF $INSTALL_FROM=="net"
-#RUN set -x \
-#    && echo "Fetch URL2 is : ${HADOOP_URL}" \
-#    && curl -fSL "${HADOOP_URL}" -o /tmp/hadoop.tar.gz \
-#    && curl -fSL "${HADOOP_URL}.asc" -o /tmp/hadoop.tar.gz.asc \
-#ELSE IF $INSTALL_FROM=="local"
-#COPY ${PACKAGE_PATH}hadoop-${HADOOP_VERSION}.tar.gz /tmp/hadoop.tar.gz
-#COPY ${PACKAGE_PATH}hadoop-${HADOOP_VERSION}.tar.gz.asc /tmp/hadoop.tar.gz.asc
-#DONE
-
 RUN set -x \
     && echo "Fetch URL2 is : ${HADOOP_URL}" \
     && curl -fSL "${HADOOP_URL}" -o /tmp/hadoop.tar.gz \
@@ -57,13 +46,14 @@ RUN set -x \
 RUN set -x \
     && tar -xvf /tmp/hadoop.tar.gz -C /opt/ \
     && rm /tmp/hadoop.tar.gz* \
-    && ln -s /opt/hadoop-$HADOOP_VERSION/etc/hadoop /etc/hadoop \
-    && if [ -e "/etc/hadoop/mapred-site.xml.template" ]; then cp /etc/hadoop/mapred-site.xml.template /etc/hadoop/mapred-site.xml ;fi \
+    && mkdir -p /etc/hadoop/conf \
+    && cp -r /opt/hadoop-$HADOOP_VERSION/etc/hadoop/* /etc/hadoop/conf
+    && if [ -e "/etc/hadoop/conf/mapred-site.xml.template" ]; then cp /etc/hadoop/conf/mapred-site.xml.template /etc/hadoop/conf/mapred-site.xml ;fi \
     && mkdir -p /opt/hadoop-$HADOOP_VERSION/logs \
     && mkdir /hadoop-data
 
 ENV HADOOP_PREFIX=/opt/hadoop-$HADOOP_VERSION
-ENV HADOOP_CONF_DIR=/etc/hadoop
+ENV HADOOP_CONF_DIR=/etc/hadoop/conf
 ENV MULTIHOMED_NETWORK=1
 ENV HADOOP_HOME=${HADOOP_PREFIX}
 ENV HADOOP_INSTALL=${HADOOP_HOME}
@@ -74,5 +64,4 @@ ENV PATH $JAVA_HOME/bin:/usr/bin:/bin:$HADOOP_PREFIX/bin/:$PATH
 ADD entrypoint.sh /opt/entrypoint/hadoop/entrypoint.sh
 RUN chmod a+x /opt/entrypoint/hadoop/entrypoint.sh
 
-ENTRYPOINT ["/opt/entrypoint/hadoop/entrypoint.sh"]
-
+ENTRYPOINT ["/opt/entrypoint/hadoop/entrypoint.sh"]
\ No newline at end of file
diff --git a/docker/dockerfile/cluster/base/entrypoint.sh b/docker/dockerfile/cluster/base/entrypoint.sh
index 3479844..2ecb8a2 100644
--- a/docker/dockerfile/cluster/base/entrypoint.sh
+++ b/docker/dockerfile/cluster/base/entrypoint.sh
@@ -51,46 +51,46 @@ function configure() {
         var="${envPrefix}_${c}"
         value=${!var}
         echo " - Setting $name=$value"
-        addProperty /etc/hadoop/$module-site.xml $name "$value"
+        addProperty /etc/hadoop/conf/$module-site.xml $name "$value"
     done
 }
 
-configure /etc/hadoop/core-site.xml core CORE_CONF
-configure /etc/hadoop/hdfs-site.xml hdfs HDFS_CONF
-configure /etc/hadoop/yarn-site.xml yarn YARN_CONF
-configure /etc/hadoop/httpfs-site.xml httpfs HTTPFS_CONF
-configure /etc/hadoop/kms-site.xml kms KMS_CONF
+configure /etc/hadoop/conf/core-site.xml core CORE_CONF
+configure /etc/hadoop/conf/hdfs-site.xml hdfs HDFS_CONF
+configure /etc/hadoop/conf/yarn-site.xml yarn YARN_CONF
+configure /etc/hadoop/conf/httpfs-site.xml httpfs HTTPFS_CONF
+configure /etc/hadoop/conf/kms-site.xml kms KMS_CONF
 
 if [ "$MULTIHOMED_NETWORK" = "1" ]; then
     echo "Configuring for multihomed network"
 
     # HDFS
-    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.rpc-bind-host 0.0.0.0
-    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.servicerpc-bind-host 0.0.0.0
-    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.http-bind-host 0.0.0.0
-    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.https-bind-host 0.0.0.0
-    addProperty /etc/hadoop/hdfs-site.xml dfs.client.use.datanode.hostname true
-    addProperty /etc/hadoop/hdfs-site.xml dfs.datanode.use.datanode.hostname true
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.namenode.rpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.namenode.servicerpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.namenode.http-bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.namenode.https-bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.client.use.datanode.hostname true
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.datanode.use.datanode.hostname true
 
     # YARN
-    addProperty /etc/hadoop/yarn-site.xml yarn.resourcemanager.bind-host 0.0.0.0
-    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
-    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
-    addProperty /etc/hadoop/yarn-site.xml yarn.timeline-service.bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/yarn-site.xml yarn.resourcemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/yarn-site.xml yarn.timeline-service.bind-host 0.0.0.0
 
     # MAPRED
-    addProperty /etc/hadoop/mapred-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/mapred-site.xml yarn.nodemanager.bind-host 0.0.0.0
 fi
 
 if [ -n "$GANGLIA_HOST" ]; then
-    mv /etc/hadoop/hadoop-metrics.properties /etc/hadoop/hadoop-metrics.properties.orig
-    mv /etc/hadoop/hadoop-metrics2.properties /etc/hadoop/hadoop-metrics2.properties.orig
+    mv /etc/hadoop/conf/hadoop-metrics.properties /etc/hadoop/conf/hadoop-metrics.properties.orig
+    mv /etc/hadoop/conf/hadoop-metrics2.properties /etc/hadoop/conf/hadoop-metrics2.properties.orig
 
     for module in mapred jvm rpc ugi; do
         echo "$module.class=org.apache.hadoop.metrics.ganglia.GangliaContext31"
         echo "$module.period=10"
         echo "$module.servers=$GANGLIA_HOST:8649"
-    done > /etc/hadoop/hadoop-metrics.properties
+    done > /etc/hadoop/conf/hadoop-metrics.properties
 
     for module in namenode datanode resourcemanager nodemanager mrappmaster jobhistoryserver; do
         echo "$module.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31"
@@ -99,7 +99,7 @@ if [ -n "$GANGLIA_HOST" ]; then
         echo "$module.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both"
         echo "$module.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40"
         echo "$module.sink.ganglia.servers=$GANGLIA_HOST:8649"
-    done > /etc/hadoop/hadoop-metrics2.properties
+    done > /etc/hadoop/conf/hadoop-metrics2.properties
 fi
 
 function wait_for_it()
diff --git a/docker/dockerfile/cluster/client/Dockerfile b/docker/dockerfile/cluster/client/Dockerfile
index 48008c1..43c935e 100644
--- a/docker/dockerfile/cluster/client/Dockerfile
+++ b/docker/dockerfile/cluster/client/Dockerfile
@@ -24,13 +24,13 @@ ARG KAFKA_VERSION=2.0.0
 ARG SPARK_VERSION=2.3.1
 ARG SPARK_HADOOP_VERSION=2.6
 
-FROM apachekylin/kylin-hive:hive_${HIVE_VERSION}_hadoop_${HADOOP_VERSION} AS hive
+FROM apachekylin/kylin-ci-hive:hive_${HIVE_VERSION}_hadoop_${HADOOP_VERSION} AS hive
 ENV JAVA_VERSION ${JAVA_VERSION}
 ENV HADOOP_VERSION ${HADOOP_VERSION}
 ENV HIVE_VERSION ${HIVE_VERSION}
 
 ARG HBASE_VERSION=1.1.2
-FROM apachekylin/kylin-hbase-master:hbase_${HBASE_VERSION} AS hbase
+FROM apachekylin/kylin-ci-hbase-master:hbase_${HBASE_VERSION} AS hbase
 ENV HBASE_VERSION ${HBASE_VERSION}
 
 
@@ -52,8 +52,8 @@ ARG HIVE_VERSION=1.2.1
 ARG HBASE_VERSION=1.1.2
 ARG ZOOKEEPER_VERSION=3.4.10
 ARG KAFKA_VERSION=2.0.0
-ARG SPARK_VERSION=2.3.1
-ARG SPARK_HADOOP_VERSION=2.6
+ARG SPARK_VERSION=2.4.6
+ARG SPARK_HADOOP_VERSION=2.7
 
 ENV JAVA_VERSION ${JAVA_VERSION}
 ENV HADOOP_VERSION ${HADOOP_VERSION}
@@ -95,24 +95,24 @@ RUN chmod a+x /opt/entrypoint/kafka/entrypoint.sh
 
 
 RUN set -x \
-    && ln -s /opt/hadoop-$HADOOP_VERSION/etc/hadoop /etc/hadoop \
-    && if [ -e "/etc/hadoop/mapred-site.xml.template" ]; then cp /etc/hadoop/mapred-site.xml.template /etc/hadoop/mapred-site.xml ;fi \
+    && mkdir -p /etc/hadoop/conf \
+    && mkdir -p /etc/hbase/conf \
+    && cp -r /opt/hadoop-$HADOOP_VERSION/etc/hadoop/* /etc/hadoop/conf \
+    && cp -r /opt/hbase-$HBASE_VERSION/conf/* /etc/hbase/conf \
+    && if [ -e "/etc/hadoop/conf/mapred-site.xml.template" ]; then cp /etc/hadoop/conf/mapred-site.xml.template /etc/hadoop/conf/mapred-site.xml ;fi \
     && mkdir -p /opt/hadoop-$HADOOP_VERSION/logs
 
-RUN ln -s /opt/hbase-$HBASE_VERSION/conf /etc/hbase
-
-
 ENV JAVA_HOME=/opt/${JAVA_VERSION}
 
 ENV HADOOP_PREFIX=/opt/hadoop-$HADOOP_VERSION
-ENV HADOOP_CONF_DIR=/etc/hadoop
+ENV HADOOP_CONF_DIR=/etc/hadoop/conf
 ENV HADOOP_HOME=${HADOOP_PREFIX}
 ENV HADOOP_INSTALL=${HADOOP_HOME}
 
 ENV HIVE_HOME=/opt/hive
 
 ENV HBASE_PREFIX=/opt/hbase-$HBASE_VERSION
-ENV HBASE_CONF_DIR=/etc/hbase
+ENV HBASE_CONF_DIR=/etc/hbase/conf
 ENV HBASE_HOME=${HBASE_PREFIX}
 
 
diff --git a/docker/dockerfile/cluster/client/entrypoint.sh b/docker/dockerfile/cluster/client/entrypoint.sh
index dddc072..7a693aa 100644
--- a/docker/dockerfile/cluster/client/entrypoint.sh
+++ b/docker/dockerfile/cluster/client/entrypoint.sh
@@ -1,7 +1,3 @@
 #!/bin/bash
 
-/opt/entrypoint/hadoop/entrypoint.sh
-/opt/entrypoint/hive/entrypoint.sh
-/opt/entrypoint/hbase/entrypoint.sh
-#/opt/entrypoint/zookeeper/entrypoint.sh
-#/opt/entrypoint/kafka/entrypoint.sh
+
diff --git a/docker/dockerfile/cluster/client/run_cli.sh b/docker/dockerfile/cluster/client/run_cli.sh
index 371c3e1..fcdd71c 100644
--- a/docker/dockerfile/cluster/client/run_cli.sh
+++ b/docker/dockerfile/cluster/client/run_cli.sh
@@ -4,7 +4,13 @@
 /opt/entrypoint/hive/entrypoint.sh
 /opt/entrypoint/hbase/entrypoint.sh
 
+sleep 180
+
+cd $KYLIN_HOME
+sh bin/sample.sh
+sh bin/kylin.sh start
+
 while :
 do
-    sleep 1000
+    sleep 100
 done
\ No newline at end of file
diff --git a/docker/dockerfile/cluster/datanode/Dockerfile b/docker/dockerfile/cluster/datanode/Dockerfile
index 54bbc10..6dcb771 100644
--- a/docker/dockerfile/cluster/datanode/Dockerfile
+++ b/docker/dockerfile/cluster/datanode/Dockerfile
@@ -17,7 +17,7 @@
 
 ARG HADOOP_VERSION=2.8.5
 ARG HADOOP_DN_PORT=50075
-FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+FROM apachekylin/kylin-ci-hadoop-base:hadoop_${HADOOP_VERSION}
 
 ENV HADOOP_DN_PORT ${HADOOP_DN_PORT}
 
diff --git a/docker/dockerfile/cluster/hbase/Dockerfile b/docker/dockerfile/cluster/hbase/Dockerfile
index 9b92d56..22daf45 100644
--- a/docker/dockerfile/cluster/hbase/Dockerfile
+++ b/docker/dockerfile/cluster/hbase/Dockerfile
@@ -41,14 +41,15 @@ RUN set -x \
     && tar -xvf /tmp/hbase.tar.gz -C /opt/ \
     && rm /tmp/hbase.tar.gz*
 
-RUN ln -s /opt/hbase-$HBASE_VERSION/conf /etc/hbase
-RUN mkdir /opt/hbase-$HBASE_VERSION/logs
+RUN mkdir -p /etc/hbase/conf \
+    && cp -r /opt/hbase-$HBASE_VERSION/conf/* /etc/hbase/conf \
+    && mkdir /opt/hbase-$HBASE_VERSION/logs
 
 RUN mkdir /hadoop-data
 
 ENV HBASE_PREFIX=/opt/hbase-$HBASE_VERSION
 ENV HBASE_HOME=${HBASE_PREFIX}
-ENV HBASE_CONF_DIR=/etc/hbase
+ENV HBASE_CONF_DIR=/etc/hbase/conf
 
 ENV USER=root
 ENV PATH $JAVA_HOME/bin:$HBASE_PREFIX/bin/:$PATH
diff --git a/docker/dockerfile/cluster/hbase/entrypoint.sh b/docker/dockerfile/cluster/hbase/entrypoint.sh
index 5aea8d9..661bd61 100644
--- a/docker/dockerfile/cluster/hbase/entrypoint.sh
+++ b/docker/dockerfile/cluster/hbase/entrypoint.sh
@@ -39,7 +39,7 @@ function configure() {
         var="${envPrefix}_${c}"
         value=${!var}
         echo " - Setting $name=$value"
-        addProperty /etc/hbase/$module-site.xml $name "$value"
+        addProperty /etc/hbase/conf/$module-site.xml $name "$value"
     done
 }
 
diff --git a/docker/dockerfile/cluster/historyserver/Dockerfile b/docker/dockerfile/cluster/historyserver/Dockerfile
index 2adda43..7c89d00 100644
--- a/docker/dockerfile/cluster/historyserver/Dockerfile
+++ b/docker/dockerfile/cluster/historyserver/Dockerfile
@@ -16,7 +16,7 @@
 #
 
 ARG HADOOP_VERSION=2.8.5
-FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+FROM apachekylin/kylin-ci-hadoop-base:hadoop_${HADOOP_VERSION}
 
 ARG HADOOP_HISTORY_PORT=8188
 ENV HADOOP_HISTORY_PORT ${HADOOP_HISTORY_PORT}
diff --git a/docker/dockerfile/cluster/hive/Dockerfile b/docker/dockerfile/cluster/hive/Dockerfile
index c3f11e5..de544d8 100644
--- a/docker/dockerfile/cluster/hive/Dockerfile
+++ b/docker/dockerfile/cluster/hive/Dockerfile
@@ -16,7 +16,7 @@
 #
 
 ARG HADOOP_VERSION=2.8.5
-FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+FROM apachekylin/kylin-ci-hadoop-base:hadoop_${HADOOP_VERSION}
 
 ENV HIVE_HOME /opt/hive
 ENV HADOOP_HOME /opt/hadoop-$HADOOP_VERSION
diff --git a/docker/dockerfile/cluster/hive/conf/hive-site.xml b/docker/dockerfile/cluster/hive/conf/hive-site.xml
index 60f3935..c6e1d92 100644
--- a/docker/dockerfile/cluster/hive/conf/hive-site.xml
+++ b/docker/dockerfile/cluster/hive/conf/hive-site.xml
@@ -14,5 +14,6 @@
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
---><configuration>
+-->
+<configuration>
 </configuration>
diff --git a/docker/dockerfile/cluster/hive/entrypoint.sh b/docker/dockerfile/cluster/hive/entrypoint.sh
index d6a888c..7a129cd 100644
--- a/docker/dockerfile/cluster/hive/entrypoint.sh
+++ b/docker/dockerfile/cluster/hive/entrypoint.sh
@@ -48,39 +48,39 @@ function configure() {
     done
 }
 
-configure /etc/hadoop/core-site.xml core CORE_CONF
-configure /etc/hadoop/hdfs-site.xml hdfs HDFS_CONF
-configure /etc/hadoop/yarn-site.xml yarn YARN_CONF
-configure /etc/hadoop/httpfs-site.xml httpfs HTTPFS_CONF
-configure /etc/hadoop/kms-site.xml kms KMS_CONF
-configure /etc/hadoop/mapred-site.xml mapred MAPRED_CONF
-configure /etc/hadoop/hive-site.xml hive HIVE_SITE_CONF
+configure /etc/hadoop/conf/core-site.xml core CORE_CONF
+configure /etc/hadoop/conf/hdfs-site.xml hdfs HDFS_CONF
+configure /etc/hadoop/conf/yarn-site.xml yarn YARN_CONF
+configure /etc/hadoop/conf/httpfs-site.xml httpfs HTTPFS_CONF
+configure /etc/hadoop/conf/kms-site.xml kms KMS_CONF
+configure /etc/hadoop/conf/mapred-site.xml mapred MAPRED_CONF
+configure /etc/hadoop/conf/hive-site.xml hive HIVE_SITE_CONF
 configure /opt/hive/conf/hive-site.xml hive HIVE_SITE_CONF
 
 if [ "$MULTIHOMED_NETWORK" = "1" ]; then
     echo "Configuring for multihomed network"
 
     # HDFS
-    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.rpc-bind-host 0.0.0.0
-    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.servicerpc-bind-host 0.0.0.0
-    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.http-bind-host 0.0.0.0
-    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.https-bind-host 0.0.0.0
-    addProperty /etc/hadoop/hdfs-site.xml dfs.client.use.datanode.hostname true
-    addProperty /etc/hadoop/hdfs-site.xml dfs.datanode.use.datanode.hostname true
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.namenode.rpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.namenode.servicerpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.namenode.http-bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.namenode.https-bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.client.use.datanode.hostname true
+    addProperty /etc/hadoop/conf/hdfs-site.xml dfs.datanode.use.datanode.hostname true
 
     # YARN
-    addProperty /etc/hadoop/yarn-site.xml yarn.resourcemanager.bind-host 0.0.0.0
-    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
-    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
-    addProperty /etc/hadoop/yarn-site.xml yarn.timeline-service.bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/yarn-site.xml yarn.resourcemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/yarn-site.xml yarn.timeline-service.bind-host 0.0.0.0
 
     # MAPRED
-    addProperty /etc/hadoop/mapred-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/conf/mapred-site.xml yarn.nodemanager.bind-host 0.0.0.0
 fi
 
 if [ -n "$GANGLIA_HOST" ]; then
-    mv /etc/hadoop/hadoop-metrics.properties /etc/hadoop/hadoop-metrics.properties.orig
-    mv /etc/hadoop/hadoop-metrics2.properties /etc/hadoop/hadoop-metrics2.properties.orig
+    mv /etc/hadoop/conf/hadoop-metrics.properties /etc/hadoop/conf/hadoop-metrics.properties.orig
+    mv /etc/hadoop/conf/hadoop-metrics2.properties /etc/hadoop/conf/hadoop-metrics2.properties.orig
 
     for module in mapred jvm rpc ugi; do
         echo "$module.class=org.apache.hadoop.metrics.ganglia.GangliaContext31"
diff --git a/docker/dockerfile/cluster/hmaster/Dockerfile b/docker/dockerfile/cluster/hmaster/Dockerfile
index 09aa0e3..bcdc1de 100644
--- a/docker/dockerfile/cluster/hmaster/Dockerfile
+++ b/docker/dockerfile/cluster/hmaster/Dockerfile
@@ -2,7 +2,7 @@
 
 ARG HBASE_VERSION=1.1.2
 
-FROM apachekylin/kylin-hbase-base:hbase_${HBASE_VERSION}
+FROM apachekylin/kylin-ci-hbase-base:hbase_${HBASE_VERSION}
 
 ENV HBASE_VERSION ${HBASE_VERSION}
 COPY run_hm.sh /run_hm.sh
diff --git a/docker/dockerfile/cluster/hregionserver/Dockerfile b/docker/dockerfile/cluster/hregionserver/Dockerfile
index aaced16..f4e63e9 100644
--- a/docker/dockerfile/cluster/hregionserver/Dockerfile
+++ b/docker/dockerfile/cluster/hregionserver/Dockerfile
@@ -1,6 +1,6 @@
 ARG HBASE_VERSION=1.1.2
 
-FROM apachekylin/kylin-hbase-base:hbase_${HBASE_VERSION}
+FROM apachekylin/kylin-ci-hbase-base:hbase_${HBASE_VERSION}
 
 ENV HBASE_VERSION ${HBASE_VERSION}
 
diff --git a/docker/dockerfile/cluster/kylin/Dockerfile b/docker/dockerfile/cluster/kylin/Dockerfile
index 2bd4a1b..9c2a4cf 100644
--- a/docker/dockerfile/cluster/kylin/Dockerfile
+++ b/docker/dockerfile/cluster/kylin/Dockerfile
@@ -20,6 +20,6 @@ ARG HIVE_VERSION=1.2.1
 ARG HBASE_VERSION=1.1.2
 ARG SPARK_VERSION=2.3.3
 
-FROM apachekylin/kylin-client:hadoop_${HADOOP_VERSION}_hive_${HIVE_VERSION}_spark_${HBASE_VERSION}_spark_${SPARK_VERSION} AS client
+FROM apachekylin/kylin-ci-client:hadoop_${HADOOP_VERSION}_hive_${HIVE_VERSION}_spark_${HBASE_VERSION}_spark_${SPARK_VERSION} AS client
 
 #CMD ["/bin/bash"]
\ No newline at end of file
diff --git a/docker/dockerfile/cluster/namenode/Dockerfile b/docker/dockerfile/cluster/namenode/Dockerfile
index 3418680..0a44841 100644
--- a/docker/dockerfile/cluster/namenode/Dockerfile
+++ b/docker/dockerfile/cluster/namenode/Dockerfile
@@ -16,7 +16,7 @@
 #
 
 ARG HADOOP_VERSION=2.8.5
-FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+FROM apachekylin/kylin-ci-hadoop-base:hadoop_${HADOOP_VERSION}
 
 ENV HADOOP_VERSION ${HADOOP_VERSION}
 
diff --git a/docker/dockerfile/cluster/nodemanager/Dockerfile b/docker/dockerfile/cluster/nodemanager/Dockerfile
index 8ec68df..631dcae 100644
--- a/docker/dockerfile/cluster/nodemanager/Dockerfile
+++ b/docker/dockerfile/cluster/nodemanager/Dockerfile
@@ -16,7 +16,7 @@
 #
 
 ARG HADOOP_VERSION=2.8.5
-FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+FROM apachekylin/kylin-ci-hadoop-base:hadoop_${HADOOP_VERSION}
 
 MAINTAINER kylin
 
diff --git a/docker/dockerfile/cluster/resourcemanager/Dockerfile b/docker/dockerfile/cluster/resourcemanager/Dockerfile
index b99027f..5fee110 100644
--- a/docker/dockerfile/cluster/resourcemanager/Dockerfile
+++ b/docker/dockerfile/cluster/resourcemanager/Dockerfile
@@ -16,7 +16,7 @@
 #
 
 ARG HADOOP_VERSION=2.8.5
-FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+FROM apachekylin/kylin-ci-hadoop-base:hadoop_${HADOOP_VERSION}
 
 MAINTAINER kylin
 
diff --git a/docker/header.sh b/docker/header.sh
index a990d90..a5a6cf7 100644
--- a/docker/header.sh
+++ b/docker/header.sh
@@ -28,8 +28,7 @@ eval set -- "${ARGS}"
 HADOOP_VERSION="2.8.5"
 HIVE_VERSION="1.2.2"
 HBASE_VERSION="1.1.2"
-
-# write write-read
+# write,write-read
 CLUSTER_MODE="write"
 # yes,no
 ENABLE_HBASE="yes"
@@ -37,7 +36,7 @@ ENABLE_HBASE="yes"
 ENABLE_LDAP="no"
 # yes,no
 ENABLE_KERBEROS="no"
-#
+# yes,no
 ENABLE_KAFKA="no"
 
 while true;
@@ -116,21 +115,21 @@ export HBASE_VERSION=$HBASE_VERSION
 export HADOOP_VERSION=$HADOOP_VERSION
 export HIVE_VERSION=$HIVE_VERSION
 
-export HADOOP_NAMENODE_IMAGETAG=apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
-export HADOOP_DATANODE_IMAGETAG=apachekylin/kylin-hadoop-datanode:hadoop_${HADOOP_VERSION}
-export HADOOP_NAMENODE_IMAGETAG=apachekylin/kylin-hadoop-namenode:hadoop_${HADOOP_VERSION}
-export HADOOP_RESOURCEMANAGER_IMAGETAG=apachekylin/kylin-hadoop-resourcemanager:hadoop_${HADOOP_VERSION}
-export HADOOP_NODEMANAGER_IMAGETAG=apachekylin/kylin-hadoop-nodemanager:hadoop_${HADOOP_VERSION}
-export HADOOP_HISTORYSERVER_IMAGETAG=apachekylin/kylin-hadoop-historyserver:hadoop_${HADOOP_VERSION}
-export HIVE_IMAGETAG=apachekylin/kylin-hive:hive_${HIVE_VERSION}_hadoop_${HADOOP_VERSION}
+export HADOOP_NAMENODE_IMAGETAG=apachekylin/kylin-ci-hadoop-base:hadoop_${HADOOP_VERSION}
+export HADOOP_DATANODE_IMAGETAG=apachekylin/kylin-ci-hadoop-datanode:hadoop_${HADOOP_VERSION}
+export HADOOP_NAMENODE_IMAGETAG=apachekylin/kylin-ci-hadoop-namenode:hadoop_${HADOOP_VERSION}
+export HADOOP_RESOURCEMANAGER_IMAGETAG=apachekylin/kylin-ci-hadoop-resourcemanager:hadoop_${HADOOP_VERSION}
+export HADOOP_NODEMANAGER_IMAGETAG=apachekylin/kylin-ci-hadoop-nodemanager:hadoop_${HADOOP_VERSION}
+export HADOOP_HISTORYSERVER_IMAGETAG=apachekylin/kylin-ci-hadoop-historyserver:hadoop_${HADOOP_VERSION}
+export HIVE_IMAGETAG=apachekylin/kylin-ci-hive:hive_${HIVE_VERSION}_hadoop_${HADOOP_VERSION}
 
-export HBASE_MASTER_IMAGETAG=apachekylin/kylin-hbase-base:hbase_${HBASE_VERSION}
-export HBASE_MASTER_IMAGETAG=apachekylin/kylin-hbase-master:hbase_${HBASE_VERSION}
-export HBASE_REGIONSERVER_IMAGETAG=apachekylin/kylin-hbase-regionserver:hbase_${HBASE_VERSION}
+export HBASE_MASTER_IMAGETAG=apachekylin/kylin-ci-hbase-base:hbase_${HBASE_VERSION}
+export HBASE_MASTER_IMAGETAG=apachekylin/kylin-ci-hbase-master:hbase_${HBASE_VERSION}
+export HBASE_REGIONSERVER_IMAGETAG=apachekylin/kylin-ci-hbase-regionserver:hbase_${HBASE_VERSION}
 
 export KAFKA_IMAGE=bitnami/kafka:2.0.0
 export LDAP_IMAGE=osixia/openldap:1.3.0
-export CLIENT_IMAGETAG=apachekylin/kylin-client:hadoop_${HADOOP_VERSION}_hive_${HIVE_VERSION}_hbase_${HBASE_VERSION}
+export CLIENT_IMAGETAG=apachekylin/kylin-ci-client:hadoop_${HADOOP_VERSION}_hive_${HIVE_VERSION}_hbase_${HBASE_VERSION}
 
 if [[ $HADOOP_VERSION < "3" ]]; then
   export HADOOP_WEBHDFS_PORT=50070
diff --git a/docker/setup_cluster.sh b/docker/setup_cluster.sh
index 34cc01e..b323cd7 100644
--- a/docker/setup_cluster.sh
+++ b/docker/setup_cluster.sh
@@ -19,34 +19,37 @@
 SCRIPT_PATH=$(cd `dirname $0`; pwd)
 WS_ROOT=`dirname $SCRIPT_PATH`
 
-source ${SCRIPT_PATH}/build_cluster_images.sh
-
-# restart cluster
+#source ${SCRIPT_PATH}/build_cluster_images.sh
+source ${SCRIPT_PATH}/header.sh
 
+echo "Restart main Hadoop cluster ......"
 KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/write/docker-compose-hadoop.yml down
 KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/write/docker-compose-zookeeper.yml down
 KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/others/docker-compose-metastore.yml down
 KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/write/docker-compose-hive.yml down
+
 sleep 5
-# hadoop
+
 KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/write/docker-compose-hadoop.yml up -d
 KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/write/docker-compose-zookeeper.yml up -d
 KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/others/docker-compose-metastore.yml up -d
 KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/write/docker-compose-hive.yml up -d
 
-
+echo "Restart Kerberos service ......"
 if [ $ENABLE_KERBEROS == "yes" ]; then
   KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/others/docker-compose-kerberos.yml down
   sleep 2
   KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/others/docker-compose-kerberos.yml up -d
 fi
 
+echo "Restart LADP service ......"
 if [ $ENABLE_LDAP == "yes" ]; then
   KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/others/docker-compose-ldap.yml down
   sleep 2
   KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/others/docker-compose-ldap.yml up -d
 fi
 
+echo "Restart Kafka service ......"
 if [ $ENABLE_KAFKA == "yes" ]; then
   KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/write/docker-compose-kafka.yml down
   sleep 2
@@ -55,6 +58,7 @@ fi
 
 
 if [ $CLUSTER_MODE == "write" ]; then
+  echo "Restart Kylin cluster & HBase cluster ......"
   KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/others/docker-compose-kylin-write.yml down
   if [ $ENABLE_HBASE == "yes" ]; then
     KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/write/docker-compose-hbase.yml down
@@ -64,8 +68,8 @@ if [ $CLUSTER_MODE == "write" ]; then
   KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/others/docker-compose-kylin-write.yml up -d
 fi
 
-# restart cluster
 if [ $CLUSTER_MODE == "write-read" ]; then
+  echo "Restart Kylin cluster[write-read mode] & Read HBase cluster ......"
   KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/read/docker-compose-zookeeper.yml down
   KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/read/docker-compose-hadoop.yml down
   KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/others/docker-compose-kylin-write-read.yml down