You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kylin.apache.org by xx...@apache.org on 2020/11/08 08:13:01 UTC

[kylin] 01/13: KYLIN-4775 Use docker-compose to deploy Hadoop and Kylin

This is an automated email from the ASF dual-hosted git repository.

xxyu pushed a commit to branch kylin-on-parquet-v2
in repository https://gitbox.apache.org/repos/asf/kylin.git

commit 69ac9cea322d5430ac0756caab730ceb19449e7e
Author: yongheng.liu <li...@gmail.com>
AuthorDate: Fri Sep 25 15:24:51 2020 +0800

    KYLIN-4775 Use docker-compose to deploy Hadoop and Kylin
---
 docker/Dockerfile_hadoop                           |  96 ---------
 docker/build_cluster_images.sh                     |  89 +++++++++
 .../{build_image.sh => build_standalone_image.sh}  |   0
 .../others/docker-compose-kerberos.yml             |  13 ++
 .../read/conf/hadoop-read}/core-site.xml           |  14 +-
 .../read/conf/hadoop-read/hdfs-site.xml            |  31 +++
 .../read/conf/hadoop-read/mapred-site.xml}         |  13 +-
 .../read/conf/hadoop-read/yarn-site.xml            |  46 +++++
 .../read}/conf/hadoop/core-site.xml                |  15 +-
 .../docker-compose/read/conf/hadoop/hdfs-site.xml  |  31 +++
 .../read/conf/hadoop/mapred-site.xml}              |  13 +-
 .../docker-compose/read/conf/hadoop/yarn-site.xml  |  46 +++++
 .../docker-compose/read/conf/hbase/hbase-site.xml  |  34 ++++
 docker/docker-compose/read/conf/hive/hive-site.xml |  25 +++
 .../read/docker-compose-zookeeper.yml              |  18 ++
 docker/docker-compose/read/read-hadoop.env         |  40 ++++
 .../read/read-hbase-distributed-local.env          |  12 ++
 docker/docker-compose/write-read/client.env        |  61 ++++++
 .../write-read/test-docker-compose-mysql.yml       |  16 ++
 docker/docker-compose/write/client.env             |  61 ++++++
 .../write/conf/hadoop-read}/core-site.xml          |  14 +-
 .../write/conf/hadoop-read/hdfs-site.xml           |  31 +++
 .../write/conf/hadoop-read/mapred-site.xml}        |  13 +-
 .../write/conf/hadoop-read/yarn-site.xml           |  46 +++++
 .../write/conf/hadoop-write}/core-site.xml         |  14 +-
 .../write/conf/hadoop-write/hdfs-site.xml          |  31 +++
 .../write/conf/hadoop-write/mapred-site.xml}       |  13 +-
 .../write/conf/hadoop-write/yarn-site.xml          |  46 +++++
 .../write}/conf/hadoop/core-site.xml               |  15 +-
 .../docker-compose/write/conf/hadoop/hdfs-site.xml |  31 +++
 .../write/conf/hadoop/mapred-site.xml}             |  13 +-
 .../docker-compose/write/conf/hadoop/yarn-site.xml |  46 +++++
 .../docker-compose/write/conf/hbase/hbase-site.xml |  34 ++++
 .../docker-compose/write/conf/hive/hive-site.xml   |  25 +++
 .../docker-compose/write/docker-compose-kafka.yml  |  18 ++
 .../docker-compose/write/docker-compose-write.yml  | 215 +++++++++++++++++++++
 .../write/docker-compose-zookeeper.yml             |  18 ++
 docker/docker-compose/write/write-hadoop.env       |  47 +++++
 .../write/write-hbase-distributed-local.env        |  12 ++
 docker/dockerfile/cluster/base/Dockerfile          |  78 ++++++++
 docker/dockerfile/cluster/base/entrypoint.sh       | 140 ++++++++++++++
 docker/dockerfile/cluster/client/Dockerfile        | 157 +++++++++++++++
 .../cluster/client/conf/hadoop-read}/core-site.xml |  14 +-
 .../cluster/client/conf/hadoop-read/hdfs-site.xml  |  31 +++
 .../client/conf/hadoop-read/mapred-site.xml}       |  13 +-
 .../cluster/client/conf/hadoop-read/yarn-site.xml  |  46 +++++
 .../client/conf/hadoop-write}/core-site.xml        |  14 +-
 .../cluster/client/conf/hadoop-write/hdfs-site.xml |  31 +++
 .../client/conf/hadoop-write/mapred-site.xml}      |  13 +-
 .../cluster/client/conf/hadoop-write/yarn-site.xml |  46 +++++
 .../cluster/client/conf/hbase/hbase-site.xml       |  34 ++++
 .../cluster/client/conf/hive/hive-site.xml         |  25 +++
 docker/dockerfile/cluster/client/entrypoint.sh     |   7 +
 docker/dockerfile/cluster/client/run_cli.sh        |  10 +
 .../cluster/datanode/Dockerfile}                   |  20 +-
 .../cluster/datanode/run_dn.sh}                    |  18 +-
 docker/dockerfile/cluster/hbase/Dockerfile         |  59 ++++++
 docker/dockerfile/cluster/hbase/entrypoint.sh      |  83 ++++++++
 .../cluster/historyserver/Dockerfile}              |  23 ++-
 .../cluster/historyserver/run_history.sh}          |  12 +-
 docker/dockerfile/cluster/hive/Dockerfile          |  73 +++++++
 .../cluster/hive/conf/beeline-log4j2.properties    |  46 +++++
 docker/dockerfile/cluster/hive/conf/hive-env.sh    |  55 ++++++
 .../cluster/hive/conf/hive-exec-log4j2.properties  |  67 +++++++
 .../cluster/hive/conf/hive-log4j2.properties       |  74 +++++++
 docker/dockerfile/cluster/hive/conf/hive-site.xml  |  18 ++
 .../dockerfile/cluster/hive/conf/ivysettings.xml   |  44 +++++
 .../hive/conf/llap-daemon-log4j2.properties        |  94 +++++++++
 docker/dockerfile/cluster/hive/entrypoint.sh       | 136 +++++++++++++
 .../cluster/hive/run_hv.sh}                        |  18 +-
 docker/dockerfile/cluster/hmaster/Dockerfile       |  13 ++
 .../cluster/hmaster/run_hm.sh}                     |  12 +-
 docker/dockerfile/cluster/hregionserver/Dockerfile |  12 ++
 .../cluster/hregionserver/run_hr.sh}               |  12 +-
 .../cluster/kerberos/Dockerfile}                   |  24 ++-
 docker/dockerfile/cluster/kerberos/conf/kadm5.acl  |   1 +
 .../cluster/kerberos/conf/kdc.conf}                |  20 +-
 .../cluster/kerberos/conf/krb5.conf}               |  32 ++-
 .../cluster/kerberos/run_krb.sh}                   |  18 +-
 .../cluster/kylin/Dockerfile}                      |  17 +-
 docker/dockerfile/cluster/kylin/entrypoint.sh      |   3 +
 docker/dockerfile/cluster/metastore-db/Dockerfile  |  12 ++
 docker/dockerfile/cluster/metastore-db/run_db.sh   |  15 ++
 .../cluster/namenode/Dockerfile}                   |  25 ++-
 .../cluster/namenode/run_nn.sh}                    |  23 ++-
 .../cluster/nodemanager/Dockerfile}                |  21 +-
 .../cluster/nodemanager/run_nm.sh}                 |  12 +-
 docker/dockerfile/cluster/pom.xml                  |  81 ++++++++
 .../cluster/resourcemanager/Dockerfile}            |  21 +-
 .../cluster/resourcemanager/run_rm.sh}             |  12 +-
 docker/{ => dockerfile/standalone}/Dockerfile      |   0
 .../standalone}/conf/hadoop/core-site.xml          |   0
 .../standalone}/conf/hadoop/hdfs-site.xml          |   0
 .../standalone}/conf/hadoop/mapred-site.xml        |   0
 .../standalone}/conf/hadoop/yarn-site.xml          |   0
 .../standalone}/conf/hive/hive-site.xml            |   0
 .../standalone}/conf/maven/settings.xml            |   0
 docker/{ => dockerfile/standalone}/entrypoint.sh   |   0
 docker/setup_cluster.sh                            |  28 +++
 docker/{run_container.sh => setup_standalone.sh}   |   0
 docker/stop_cluster.sh                             |  23 +++
 101 files changed, 2906 insertions(+), 386 deletions(-)

diff --git a/docker/Dockerfile_hadoop b/docker/Dockerfile_hadoop
deleted file mode 100644
index 8e76855..0000000
--- a/docker/Dockerfile_hadoop
+++ /dev/null
@@ -1,96 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Docker image with Hadoop/Spark/Hive/ZK/Kafka installed
-FROM centos:6.9
-
-ENV HIVE_VERSION 1.2.1
-ENV HADOOP_VERSION 2.7.0
-ENV SPARK_VERSION 2.4.6
-ENV ZK_VERSION 3.4.6
-ENV KAFKA_VERSION 1.1.1
-
-ENV JAVA_HOME /home/admin/jdk1.8.0_141
-ENV MVN_HOME /home/admin/apache-maven-3.6.1
-ENV HADOOP_HOME /home/admin/hadoop-$HADOOP_VERSION
-ENV HIVE_HOME /home/admin/apache-hive-$HIVE_VERSION-bin
-ENV HADOOP_CONF $HADOOP_HOME/etc/hadoop
-ENV HADOOP_CONF_DIR $HADOOP_HOME/etc/hadoop
-ENV SPARK_HOME /home/admin/spark-$SPARK_VERSION-bin-hadoop2.7
-ENV SPARK_CONF_DIR $SPARK_HOME/conf
-ENV ZK_HOME /home/admin/zookeeper-$ZK_VERSION
-ENV KAFKA_HOME /home/admin/kafka_2.11-$KAFKA_VERSION
-ENV PATH $PATH:$JAVA_HOME/bin:$ZK_HOME/bin:$HADOOP_HOME/bin:$HIVE_HOME/bin:$MVN_HOME/bin:$KAFKA_HOME/bin
-
-USER root
-
-WORKDIR /home/admin
-
-# install tools
-RUN yum -y install lsof.x86_64 wget.x86_64 tar.x86_64 git.x86_64 mysql-server.x86_64 mysql.x86_64 unzip.x86_64
-
-# install mvn
-RUN wget https://archive.apache.org/dist/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.tar.gz \
-    && tar -zxvf apache-maven-3.6.1-bin.tar.gz \
-    && rm -f apache-maven-3.6.1-bin.tar.gz
-COPY conf/maven/settings.xml $MVN_HOME/conf/settings.xml
-
-# install npm
-RUN curl -sL https://rpm.nodesource.com/setup_8.x | bash - \
-    && yum install -y nodejs
-
-# setup jdk
-RUN wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u141-b15/336fa29ff2bb4ef291e347e091f7f4a7/jdk-8u141-linux-x64.tar.gz" \
-    && tar -zxvf /home/admin/jdk-8u141-linux-x64.tar.gz \
-    && rm -f /home/admin/jdk-8u141-linux-x64.tar.gz
-
-# setup hadoop
-RUN wget https://archive.apache.org/dist/hadoop/core/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz \
-    && tar -zxvf /home/admin/hadoop-$HADOOP_VERSION.tar.gz \
-    && rm -f /home/admin/hadoop-$HADOOP_VERSION.tar.gz \
-    && mkdir -p /data/hadoop
-COPY conf/hadoop/* $HADOOP_CONF/
-
-# setup hive
-RUN wget https://archive.apache.org/dist/hive/hive-$HIVE_VERSION/apache-hive-$HIVE_VERSION-bin.tar.gz \
-    && tar -zxvf /home/admin/apache-hive-$HIVE_VERSION-bin.tar.gz \
-    && rm -f /home/admin/apache-hive-$HIVE_VERSION-bin.tar.gz \
-    && wget -P $HIVE_HOME/lib https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.24/mysql-connector-java-5.1.24.jar
-COPY conf/hive/hive-site.xml $HIVE_HOME/conf
-COPY conf/hive/hive-site.xml $HADOOP_CONF/
-
-# setup spark
-RUN wget https://archive.apache.org/dist/spark/spark-$SPARK_VERSION/spark-$SPARK_VERSION-bin-hadoop2.7.tgz \
-    && tar -zxvf /home/admin/spark-$SPARK_VERSION-bin-hadoop2.7.tgz \
-    && rm -f /home/admin/spark-$SPARK_VERSION-bin-hadoop2.7.tgz \
-    && cp $HIVE_HOME/conf/hive-site.xml $SPARK_HOME/conf \
-    && cp $SPARK_HOME/yarn/*.jar $HADOOP_HOME/share/hadoop/yarn/lib
-RUN cp $HIVE_HOME/lib/mysql-connector-java-5.1.24.jar $SPARK_HOME/jars
-RUN cp $HIVE_HOME/hcatalog/share/hcatalog/hive-hcatalog-core-1.2.1.jar $SPARK_HOME/jars/
-COPY conf/spark/* $SPARK_CONF_DIR/
-
-# setup kafka
-RUN wget https://archive.apache.org/dist/kafka/$KAFKA_VERSION/kafka_2.11-$KAFKA_VERSION.tgz \
-    && tar -zxvf /home/admin/kafka_2.11-$KAFKA_VERSION.tgz \
-    && rm -f /home/admin/kafka_2.11-$KAFKA_VERSION.tgz
-
-# setup zk
-RUN wget https://archive.apache.org/dist/zookeeper/zookeeper-$ZK_VERSION/zookeeper-$ZK_VERSION.tar.gz \
-    && tar -zxvf /home/admin/zookeeper-$ZK_VERSION.tar.gz \
-    && rm -f /home/admin/zookeeper-$ZK_VERSION.tar.gz \
-    && mkdir -p /data/zookeeper
-COPY conf/zk/zoo.cfg $ZK_HOME/conf
diff --git a/docker/build_cluster_images.sh b/docker/build_cluster_images.sh
new file mode 100644
index 0000000..ac60533
--- /dev/null
+++ b/docker/build_cluster_images.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+ARGS=`getopt -o h:i:b --long hadoop_version:,hive_version:,hbase_version: -n 'parameter.bash' -- "$@"`
+
+if [ $? != 0 ]; then
+    echo "Terminating..."
+    exit 1
+fi
+
+eval set -- "${ARGS}"
+
+HADOOP_VERSION="2.8.5"
+HIVE_VERSION="1.2.2"
+HBASE_VERSION="1.1.2"
+
+while true;
+do
+    case "$1" in
+        --hadoop_version)
+            HADOOP_VERSION=$2;
+            shift 2;
+            ;;
+        --hive_version)
+            HIVE_VERSION=$2;
+            shift 2;
+            ;;
+        --hbase_version)
+            HBASE_VERSION=$2;
+            shift 2;
+            ;;
+        --)
+            break
+            ;;
+        *)
+            echo "Internal error!"
+            break
+            ;;
+    esac
+done
+
+for arg in $@
+do
+    echo "processing $arg"
+done
+
+echo "........hadoop version: "$HADOOP_VERSION
+echo "........hive version: "$HIVE_VERSION
+echo "........hbase version: "$HBASE_VERSION
+
+#docker build -t apachekylin/kylin-metastore:mysql_5.6.49 ./kylin/metastore-db
+
+docker build -t apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/base
+docker build -t apachekylin/kylin-hadoop-namenode:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/namenode
+docker build -t apachekylin/kylin-hadoop-datanode:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/datanode
+docker build -t apachekylin/kylin-hadoop-resourcemanager:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/resourcemanager
+docker build -t apachekylin/kylin-hadoop-nodemanager:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/nodemanager
+docker build -t apachekylin/kylin-hadoop-historyserver:hadoop_${HADOOP_VERSION} --build-arg HADOOP_VERSION=${HADOOP_VERSION} ./dockerfile/cluster/historyserver
+
+docker build -t apachekylin/kylin-hive:hive_${HIVE_VERSION}_hadoop_${HADOOP_VERSION} \
+--build-arg HIVE_VERSION=${HIVE_VERSION} \
+--build-arg HADOOP_VERSION=${HADOOP_VERSION} \
+./dockerfile/cluster/hive
+
+docker build -t apachekylin/kylin-hbase-base:hbase_${HBASE_VERSION} --build-arg HBASE_VERSION=${HBASE_VERSION} ./dockerfile/cluster/hbase
+docker build -t apachekylin/kylin-hbase-master:hbase_${HBASE_VERSION} --build-arg HBASE_VERSION=${HBASE_VERSION} ./dockerfile/cluster/hmaster
+docker build -t apachekylin/kylin-hbase-regionserver:hbase_${HBASE_VERSION} --build-arg HBASE_VERSION=${HBASE_VERSION} ./dockerfile/cluster/hregionserver
+
+docker build -t apachekylin/kylin-kerberos:latest ./dockerfile/cluster/kerberos
+
+docker build -t apachekylin/kylin-client:hadoop_${HADOOP_VERSION}_hive_${HIVE_VERSION}_hbase_${HBASE_VERSION} \
+--build-arg HIVE_VERSION=${HIVE_VERSION} \
+--build-arg HADOOP_VERSION=${HADOOP_VERSION} \
+--build-arg HBASE_VERSION=${HBASE_VERSION} \
+./dockerfile/cluster/client
+
+
+export HADOOP_NAMENODE_IMAGETAG=apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+export HADOOP_DATANODE_IMAGETAG=apachekylin/kylin-hadoop-datanode:hadoop_${HADOOP_VERSION}
+export HADOOP_NAMENODE_IMAGETAG=apachekylin/kylin-hadoop-namenode:hadoop_${HADOOP_VERSION}
+export HADOOP_RESOURCEMANAGER_IMAGETAG=apachekylin/kylin-hadoop-resourcemanager:hadoop_${HADOOP_VERSION}
+export HADOOP_NODEMANAGER_IMAGETAG=apachekylin/kylin-hadoop-nodemanager:hadoop_${HADOOP_VERSION}
+export HADOOP_HISTORYSERVER_IMAGETAG=apachekylin/kylin-hadoop-historyserver:hadoop_${HADOOP_VERSION}
+export HIVE_IMAGETAG=apachekylin/kylin-hive:hive_${HIVE_VERSION}_hadoop_${HADOOP_VERSION}
+export HBASE_MASTER_IMAGETAG=apachekylin/kylin-hbase-base:hbase_${HBASE_VERSION}
+export HBASE_MASTER_IMAGETAG=apachekylin/kylin-hbase-master:hbase_${HBASE_VERSION}
+export HBASE_REGIONSERVER_IMAGETAG=apachekylin/kylin-hbase-regionserver:hbase_${HBASE_VERSION}
+export CLIENT_IMAGETAG=apachekylin/kylin-client:hadoop_${HADOOP_VERSION}_hive_${HIVE_VERSION}_hbase_${HBASE_VERSION}
+export KERBEROS_IMAGE=apachekylin/kylin-kerberos:latest
+
diff --git a/docker/build_image.sh b/docker/build_standalone_image.sh
similarity index 100%
copy from docker/build_image.sh
copy to docker/build_standalone_image.sh
diff --git a/docker/docker-compose/others/docker-compose-kerberos.yml b/docker/docker-compose/others/docker-compose-kerberos.yml
new file mode 100644
index 0000000..3d90062
--- /dev/null
+++ b/docker/docker-compose/others/docker-compose-kerberos.yml
@@ -0,0 +1,13 @@
+version: "3.3"
+
+services:
+  kerberos-kdc:
+    image: ${KERBEROS_IMAGE}
+    container_name: kerberos-kdc
+    hostname: kerberos-kdc
+    networks:
+      - write_kylin
+
+networks:
+  write_kylin:
+    external: true
\ No newline at end of file
diff --git a/docker/conf/hadoop/core-site.xml b/docker/docker-compose/read/conf/hadoop-read/core-site.xml
similarity index 63%
copy from docker/conf/hadoop/core-site.xml
copy to docker/docker-compose/read/conf/hadoop-read/core-site.xml
index 6fe6404..69fc462 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/docker-compose/read/conf/hadoop-read/core-site.xml
@@ -17,13 +17,9 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+<property><name>hadoop.proxyuser.hue.hosts</name><value>*</value></property>
+<property><name>fs.defaultFS</name><value>hdfs://write-namenode:8020</value></property>
+<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>hadoop.proxyuser.hue.groups</name><value>*</value></property>
+<property><name>hadoop.http.staticuser.user</name><value>root</value></property>
 </configuration>
diff --git a/docker/docker-compose/read/conf/hadoop-read/hdfs-site.xml b/docker/docker-compose/read/conf/hadoop-read/hdfs-site.xml
new file mode 100644
index 0000000..cdf7778
--- /dev/null
+++ b/docker/docker-compose/read/conf/hadoop-read/hdfs-site.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<property><name>dfs.namenode.name.dir</name><value>file:///hadoop/dfs/name</value></property>
+<property><name>dfs.namenode.datanode.registration.ip-hostname-check</name><value>false</value></property>
+<property><name>dfs.permissions.enabled</name><value>false</value></property>
+<property><name>dfs.webhdfs.enabled</name><value>true</value></property>
+<property><name>dfs.namenode.rpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.servicerpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.http-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.https-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.client.use.datanode.hostname</name><value>true</value></property>
+<property><name>dfs.datanode.use.datanode.hostname</name><value>true</value></property>
+</configuration>
diff --git a/docker/conf/hadoop/core-site.xml b/docker/docker-compose/read/conf/hadoop-read/mapred-site.xml
similarity index 69%
copy from docker/conf/hadoop/core-site.xml
copy to docker/docker-compose/read/conf/hadoop-read/mapred-site.xml
index 6fe6404..d5cc450 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/docker-compose/read/conf/hadoop-read/mapred-site.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8"?>
+<?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,13 +17,6 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
 </configuration>
diff --git a/docker/docker-compose/read/conf/hadoop-read/yarn-site.xml b/docker/docker-compose/read/conf/hadoop-read/yarn-site.xml
new file mode 100644
index 0000000..392cf4c
--- /dev/null
+++ b/docker/docker-compose/read/conf/hadoop-read/yarn-site.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+<!-- Site specific YARN configuration properties -->
+
+<property><name>yarn.resourcemanager.fs.state-store.uri</name><value>/rmstate</value></property>
+<property><name>yarn.timeline-service.generic-application-history.enabled</name><value>true</value></property>
+<property><name>mapreduce.map.output.compress</name><value>true</value></property>
+<property><name>yarn.resourcemanager.recovery.enabled</name><value>true</value></property>
+<property><name>mapred.map.output.compress.codec</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>yarn.timeline-service.enabled</name><value>true</value></property>
+<property><name>yarn.log-aggregation-enable</name><value>true</value></property>
+<property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value></property>
+<property><name>yarn.resourcemanager.system-metrics-publisher.enabled</name><value>true</value></property>
+<property><name>yarn.nodemanager.remote-app-log-dir</name><value>/app-logs</value></property>
+<property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property>
+<property><name>yarn.resourcemanager.resource_tracker.address</name><value>read-resourcemanager:8031</value></property>
+<property><name>yarn.resourcemanager.hostname</name><value>read-resourcemanager</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-vcores</name><value>4</value></property>
+<property><name>yarn.timeline-service.hostname</name><value>read-historyserver</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-mb</name><value>8192</value></property>
+<property><name>yarn.log.server.url</name><value>http://read-historyserver:8188/applicationhistory/logs/</value></property>
+<property><name>yarn.resourcemanager.scheduler.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value></property>
+<property><name>yarn.resourcemanager.scheduler.address</name><value>read-resourcemanager:8030</value></property>
+<property><name>yarn.resourcemanager.address</name><value>read-resourcemanager:8032</value></property>
+<property><name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name><value>98.5</value></property>
+<property><name>yarn.nodemanager.resource.memory-mb</name><value>16384</value></property>
+<property><name>yarn.nodemanager.resource.cpu-vcores</name><value>8</value></property>
+<property><name>yarn.resourcemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.timeline-service.bind-host</name><value>0.0.0.0</value></property>
+</configuration>
diff --git a/docker/conf/hadoop/core-site.xml b/docker/docker-compose/read/conf/hadoop/core-site.xml
similarity index 63%
copy from docker/conf/hadoop/core-site.xml
copy to docker/docker-compose/read/conf/hadoop/core-site.xml
index 6fe6404..dd5a81b 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/docker-compose/read/conf/hadoop/core-site.xml
@@ -17,13 +17,10 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+<property><name>hadoop.proxyuser.hue.hosts</name><value>*</value></property>
+<property><name>fs.defaultFS</name><value>hdfs://write-namenode:8020</value></property>
+<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>hadoop.proxyuser.hue.groups</name><value>*</value></property>
+<property><name>hadoop.http.staticuser.user</name><value>root</value></property>
+
 </configuration>
diff --git a/docker/docker-compose/read/conf/hadoop/hdfs-site.xml b/docker/docker-compose/read/conf/hadoop/hdfs-site.xml
new file mode 100644
index 0000000..cdf7778
--- /dev/null
+++ b/docker/docker-compose/read/conf/hadoop/hdfs-site.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<property><name>dfs.namenode.name.dir</name><value>file:///hadoop/dfs/name</value></property>
+<property><name>dfs.namenode.datanode.registration.ip-hostname-check</name><value>false</value></property>
+<property><name>dfs.permissions.enabled</name><value>false</value></property>
+<property><name>dfs.webhdfs.enabled</name><value>true</value></property>
+<property><name>dfs.namenode.rpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.servicerpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.http-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.https-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.client.use.datanode.hostname</name><value>true</value></property>
+<property><name>dfs.datanode.use.datanode.hostname</name><value>true</value></property>
+</configuration>
diff --git a/docker/conf/hadoop/core-site.xml b/docker/docker-compose/read/conf/hadoop/mapred-site.xml
similarity index 69%
copy from docker/conf/hadoop/core-site.xml
copy to docker/docker-compose/read/conf/hadoop/mapred-site.xml
index 6fe6404..d5cc450 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/docker-compose/read/conf/hadoop/mapred-site.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8"?>
+<?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,13 +17,6 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
 </configuration>
diff --git a/docker/docker-compose/read/conf/hadoop/yarn-site.xml b/docker/docker-compose/read/conf/hadoop/yarn-site.xml
new file mode 100644
index 0000000..b55dd34
--- /dev/null
+++ b/docker/docker-compose/read/conf/hadoop/yarn-site.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+<!-- Site specific YARN configuration properties -->
+
+<property><name>yarn.resourcemanager.fs.state-store.uri</name><value>/rmstate</value></property>
+<property><name>yarn.timeline-service.generic-application-history.enabled</name><value>true</value></property>
+<property><name>mapreduce.map.output.compress</name><value>true</value></property>
+<property><name>yarn.resourcemanager.recovery.enabled</name><value>true</value></property>
+<property><name>mapred.map.output.compress.codec</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>yarn.timeline-service.enabled</name><value>true</value></property>
+<property><name>yarn.log-aggregation-enable</name><value>true</value></property>
+<property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value></property>
+<property><name>yarn.resourcemanager.system-metrics-publisher.enabled</name><value>true</value></property>
+<property><name>yarn.nodemanager.remote-app-log-dir</name><value>/app-logs</value></property>
+<property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property>
+<property><name>yarn.resourcemanager.resource_tracker.address</name><value>write-resourcemanager:8031</value></property>
+<property><name>yarn.resourcemanager.hostname</name><value>write-resourcemanager</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-vcores</name><value>4</value></property>
+<property><name>yarn.timeline-service.hostname</name><value>write-historyserver</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-mb</name><value>8192</value></property>
+<property><name>yarn.log.server.url</name><value>http://write-historyserver:8188/applicationhistory/logs/</value></property>
+<property><name>yarn.resourcemanager.scheduler.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value></property>
+<property><name>yarn.resourcemanager.scheduler.address</name><value>write-resourcemanager:8030</value></property>
+<property><name>yarn.resourcemanager.address</name><value>write-resourcemanager:8032</value></property>
+<property><name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name><value>98.5</value></property>
+<property><name>yarn.nodemanager.resource.memory-mb</name><value>16384</value></property>
+<property><name>yarn.nodemanager.resource.cpu-vcores</name><value>8</value></property>
+<property><name>yarn.resourcemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.timeline-service.bind-host</name><value>0.0.0.0</value></property>
+</configuration>
diff --git a/docker/docker-compose/read/conf/hbase/hbase-site.xml b/docker/docker-compose/read/conf/hbase/hbase-site.xml
new file mode 100644
index 0000000..988d91c
--- /dev/null
+++ b/docker/docker-compose/read/conf/hbase/hbase-site.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+<property><name>hbase.zookeeper.quorum</name><value>read-zookeeper</value></property>
+<property><name>hbase.master</name><value>read-hbase-master:16000</value></property>
+<property><name>hbase.regionserver.port</name><value>16020</value></property>
+<property><name>hbase.regionserver.info.port</name><value>16030</value></property>
+<property><name>DIR</name><value>/etc/hbase</value></property>
+<property><name>hbase.cluster.distributed</name><value>true</value></property>
+<property><name>hbase.rootdir</name><value>hdfs://read-namenode:8020/hbase</value></property>
+<property><name>hbase.master.info.port</name><value>16010</value></property>
+<property><name>hbase.master.hostname</name><value>read-hbase-master</value></property>
+<property><name>hbase.master.port</name><value>16000</value></property>
+</configuration>
diff --git a/docker/docker-compose/read/conf/hive/hive-site.xml b/docker/docker-compose/read/conf/hive/hive-site.xml
new file mode 100644
index 0000000..c60fe36
--- /dev/null
+++ b/docker/docker-compose/read/conf/hive/hive-site.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--><configuration>
+    <property><name>hive.metastore.uris</name><value>thrift://write-hive-metastore:9083</value></property>
+    <property><name>datanucleus.autoCreateSchema</name><value>false</value></property>
+    <property><name>javax.jdo.option.ConnectionURL</name><value>jdbc:postgresql://write-hive-metastore-postgresql/metastore</value></property>
+    <property><name>javax.jdo.option.ConnectionDriverName</name><value>org.postgresql.Driver</value></property>
+    <property><name>javax.jdo.option.ConnectionPassword</name><value>hive</value></property>
+    <property><name>javax.jdo.option.ConnectionUserName</name><value>hive</value></property>
+</configuration>
+
diff --git a/docker/docker-compose/read/docker-compose-zookeeper.yml b/docker/docker-compose/read/docker-compose-zookeeper.yml
new file mode 100644
index 0000000..71ea252
--- /dev/null
+++ b/docker/docker-compose/read/docker-compose-zookeeper.yml
@@ -0,0 +1,18 @@
+version: "3.3"
+
+services:
+  read-zookeeper:
+    image: ${ZOOKEEPER_IMAGETAG:-zookeeper:3.4.10}
+    container_name: read-zookeeper
+    hostname: read-zookeeper
+    environment:
+      ZOO_MY_ID: 1
+      ZOO_SERVERS: server.1=0.0.0.0:2888:3888
+    networks:
+      - write_kylin
+    ports:
+      - 2182:2181
+
+networks:
+  write_kylin:
+    external: true
\ No newline at end of file
diff --git a/docker/docker-compose/read/read-hadoop.env b/docker/docker-compose/read/read-hadoop.env
new file mode 100644
index 0000000..9c0086d
--- /dev/null
+++ b/docker/docker-compose/read/read-hadoop.env
@@ -0,0 +1,40 @@
+CORE_CONF_fs_defaultFS=hdfs://read-namenode:8020
+CORE_CONF_hadoop_http_staticuser_user=root
+CORE_CONF_hadoop_proxyuser_hue_hosts=*
+CORE_CONF_hadoop_proxyuser_hue_groups=*
+CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec
+
+HDFS_CONF_dfs_webhdfs_enabled=true
+HDFS_CONF_dfs_permissions_enabled=false
+HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false
+
+YARN_CONF_yarn_log___aggregation___enable=true
+YARN_CONF_yarn_log_server_url=http://read-historyserver:8188/applicationhistory/logs/
+YARN_CONF_yarn_resourcemanager_recovery_enabled=true
+YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
+YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=8192
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=4
+YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
+YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
+YARN_CONF_yarn_resourcemanager_hostname=read-resourcemanager
+YARN_CONF_yarn_resourcemanager_address=read-resourcemanager:8032
+YARN_CONF_yarn_resourcemanager_scheduler_address=read-resourcemanager:8030
+YARN_CONF_yarn_resourcemanager_resource__tracker_address=read-resourcemanager:8031
+YARN_CONF_yarn_timeline___service_enabled=true
+YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
+YARN_CONF_yarn_timeline___service_hostname=read-historyserver
+YARN_CONF_mapreduce_map_output_compress=true
+YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
+YARN_CONF_yarn_nodemanager_resource_memory___mb=16384
+YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8
+YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
+YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
+YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
+
+MAPRED_CONF_mapreduce_framework_name=yarn
+MAPRED_CONF_mapred_child_java_opts=-Xmx4096m
+MAPRED_CONF_mapreduce_map_memory_mb=4096
+MAPRED_CONF_mapreduce_reduce_memory_mb=8192
+MAPRED_CONF_mapreduce_map_java_opts=-Xmx3072m
+MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx6144m
diff --git a/docker/docker-compose/read/read-hbase-distributed-local.env b/docker/docker-compose/read/read-hbase-distributed-local.env
new file mode 100644
index 0000000..4ba8e19
--- /dev/null
+++ b/docker/docker-compose/read/read-hbase-distributed-local.env
@@ -0,0 +1,12 @@
+HBASE_CONF_hbase_rootdir=hdfs://read-namenode:8020/hbase
+HBASE_CONF_hbase_cluster_distributed=true
+HBASE_CONF_hbase_zookeeper_quorum=read-zookeeper
+
+HBASE_CONF_hbase_master=read-hbase-master:16000
+HBASE_CONF_hbase_master_hostname=read-hbase-master
+HBASE_CONF_hbase_master_port=16000
+HBASE_CONF_hbase_master_info_port=16010
+HBASE_CONF_hbase_regionserver_port=16020
+HBASE_CONF_hbase_regionserver_info_port=16030
+
+HBASE_MANAGES_ZK=false
\ No newline at end of file
diff --git a/docker/docker-compose/write-read/client.env b/docker/docker-compose/write-read/client.env
new file mode 100644
index 0000000..fc0743c
--- /dev/null
+++ b/docker/docker-compose/write-read/client.env
@@ -0,0 +1,61 @@
+CORE_CONF_fs_defaultFS=hdfs://write-namenode:8020
+CORE_CONF_hadoop_http_staticuser_user=root
+CORE_CONF_hadoop_proxyuser_hue_hosts=*
+CORE_CONF_hadoop_proxyuser_hue_groups=*
+CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec
+
+HDFS_CONF_dfs_webhdfs_enabled=true
+HDFS_CONF_dfs_permissions_enabled=false
+HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false
+
+YARN_CONF_yarn_log___aggregation___enable=true
+YARN_CONF_yarn_log_server_url=http://write-historyserver:8188/applicationhistory/logs/
+YARN_CONF_yarn_resourcemanager_recovery_enabled=true
+YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
+YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=8192
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=4
+YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
+YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
+YARN_CONF_yarn_resourcemanager_hostname=write-resourcemanager
+YARN_CONF_yarn_resourcemanager_address=write-resourcemanager:8032
+YARN_CONF_yarn_resourcemanager_scheduler_address=write-resourcemanager:8030
+YARN_CONF_yarn_resourcemanager_resource__tracker_address=write-resourcemanager:8031
+YARN_CONF_yarn_timeline___service_enabled=true
+YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
+YARN_CONF_yarn_timeline___service_hostname=write-historyserver
+YARN_CONF_mapreduce_map_output_compress=true
+YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
+YARN_CONF_yarn_nodemanager_resource_memory___mb=16384
+YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8
+YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
+YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
+YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
+
+MAPRED_CONF_mapreduce_framework_name=yarn
+MAPRED_CONF_mapred_child_java_opts=-Xmx4096m
+MAPRED_CONF_mapreduce_map_memory_mb=4096
+MAPRED_CONF_mapreduce_reduce_memory_mb=8192
+MAPRED_CONF_mapreduce_map_java_opts=-Xmx3072m
+MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx6144m
+
+HIVE_SITE_CONF_javax_jdo_option_ConnectionURL=jdbc:mysql://metastore-db/metastore
+HIVE_SITE_CONF_javax_jdo_option_ConnectionDriverName=com.mysql.jdbc.Driver
+HIVE_SITE_CONF_javax_jdo_option_ConnectionUserName=kylin
+HIVE_SITE_CONF_javax_jdo_option_ConnectionPassword=kylin
+HIVE_SITE_CONF_datanucleus_autoCreateSchema=true
+HIVE_SITE_CONF_hive_metastore_uris=thrift://write-hive-metastore:9083
+
+HBASE_CONF_hbase_rootdir=hdfs://read-namenode:8020/hbase
+HBASE_CONF_hbase_cluster_distributed=true
+HBASE_CONF_hbase_zookeeper_quorum=read-zookeeper
+
+HBASE_CONF_hbase_master=read-hbase-master:16000
+HBASE_CONF_hbase_master_hostname=read-hbase-master
+HBASE_CONF_hbase_master_port=16000
+HBASE_CONF_hbase_master_info_port=16010
+HBASE_CONF_hbase_regionserver_port=16020
+HBASE_CONF_hbase_regionserver_info_port=16030
+
+HBASE_MANAGES_ZK=false
+
diff --git a/docker/docker-compose/write-read/test-docker-compose-mysql.yml b/docker/docker-compose/write-read/test-docker-compose-mysql.yml
new file mode 100644
index 0000000..5906c1e
--- /dev/null
+++ b/docker/docker-compose/write-read/test-docker-compose-mysql.yml
@@ -0,0 +1,16 @@
+
+version: "3.3"
+
+services:
+  metastore-db:
+    image: mysql:5.6.49
+    container_name: metastore-db
+    hostname: metastore-db
+    volumes:
+      - ./data/mysql:/var/lib/mysql
+    environment:
+      - MYSQL_ROOT_PASSWORD=kylin
+      - MYSQL_DATABASE=kylin
+      - MYSQL_USER=kylin
+      - MYSQL_PASSWORD=kylin
+
diff --git a/docker/docker-compose/write/client.env b/docker/docker-compose/write/client.env
new file mode 100644
index 0000000..fc0743c
--- /dev/null
+++ b/docker/docker-compose/write/client.env
@@ -0,0 +1,61 @@
+CORE_CONF_fs_defaultFS=hdfs://write-namenode:8020
+CORE_CONF_hadoop_http_staticuser_user=root
+CORE_CONF_hadoop_proxyuser_hue_hosts=*
+CORE_CONF_hadoop_proxyuser_hue_groups=*
+CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec
+
+HDFS_CONF_dfs_webhdfs_enabled=true
+HDFS_CONF_dfs_permissions_enabled=false
+HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false
+
+YARN_CONF_yarn_log___aggregation___enable=true
+YARN_CONF_yarn_log_server_url=http://write-historyserver:8188/applicationhistory/logs/
+YARN_CONF_yarn_resourcemanager_recovery_enabled=true
+YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
+YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=8192
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=4
+YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
+YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
+YARN_CONF_yarn_resourcemanager_hostname=write-resourcemanager
+YARN_CONF_yarn_resourcemanager_address=write-resourcemanager:8032
+YARN_CONF_yarn_resourcemanager_scheduler_address=write-resourcemanager:8030
+YARN_CONF_yarn_resourcemanager_resource__tracker_address=write-resourcemanager:8031
+YARN_CONF_yarn_timeline___service_enabled=true
+YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
+YARN_CONF_yarn_timeline___service_hostname=write-historyserver
+YARN_CONF_mapreduce_map_output_compress=true
+YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
+YARN_CONF_yarn_nodemanager_resource_memory___mb=16384
+YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8
+YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
+YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
+YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
+
+MAPRED_CONF_mapreduce_framework_name=yarn
+MAPRED_CONF_mapred_child_java_opts=-Xmx4096m
+MAPRED_CONF_mapreduce_map_memory_mb=4096
+MAPRED_CONF_mapreduce_reduce_memory_mb=8192
+MAPRED_CONF_mapreduce_map_java_opts=-Xmx3072m
+MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx6144m
+
+HIVE_SITE_CONF_javax_jdo_option_ConnectionURL=jdbc:mysql://metastore-db/metastore
+HIVE_SITE_CONF_javax_jdo_option_ConnectionDriverName=com.mysql.jdbc.Driver
+HIVE_SITE_CONF_javax_jdo_option_ConnectionUserName=kylin
+HIVE_SITE_CONF_javax_jdo_option_ConnectionPassword=kylin
+HIVE_SITE_CONF_datanucleus_autoCreateSchema=true
+HIVE_SITE_CONF_hive_metastore_uris=thrift://write-hive-metastore:9083
+
+HBASE_CONF_hbase_rootdir=hdfs://read-namenode:8020/hbase
+HBASE_CONF_hbase_cluster_distributed=true
+HBASE_CONF_hbase_zookeeper_quorum=read-zookeeper
+
+HBASE_CONF_hbase_master=read-hbase-master:16000
+HBASE_CONF_hbase_master_hostname=read-hbase-master
+HBASE_CONF_hbase_master_port=16000
+HBASE_CONF_hbase_master_info_port=16010
+HBASE_CONF_hbase_regionserver_port=16020
+HBASE_CONF_hbase_regionserver_info_port=16030
+
+HBASE_MANAGES_ZK=false
+
diff --git a/docker/conf/hadoop/core-site.xml b/docker/docker-compose/write/conf/hadoop-read/core-site.xml
similarity index 63%
copy from docker/conf/hadoop/core-site.xml
copy to docker/docker-compose/write/conf/hadoop-read/core-site.xml
index 6fe6404..69fc462 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/docker-compose/write/conf/hadoop-read/core-site.xml
@@ -17,13 +17,9 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+<property><name>hadoop.proxyuser.hue.hosts</name><value>*</value></property>
+<property><name>fs.defaultFS</name><value>hdfs://write-namenode:8020</value></property>
+<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>hadoop.proxyuser.hue.groups</name><value>*</value></property>
+<property><name>hadoop.http.staticuser.user</name><value>root</value></property>
 </configuration>
diff --git a/docker/docker-compose/write/conf/hadoop-read/hdfs-site.xml b/docker/docker-compose/write/conf/hadoop-read/hdfs-site.xml
new file mode 100644
index 0000000..cdf7778
--- /dev/null
+++ b/docker/docker-compose/write/conf/hadoop-read/hdfs-site.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<property><name>dfs.namenode.name.dir</name><value>file:///hadoop/dfs/name</value></property>
+<property><name>dfs.namenode.datanode.registration.ip-hostname-check</name><value>false</value></property>
+<property><name>dfs.permissions.enabled</name><value>false</value></property>
+<property><name>dfs.webhdfs.enabled</name><value>true</value></property>
+<property><name>dfs.namenode.rpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.servicerpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.http-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.https-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.client.use.datanode.hostname</name><value>true</value></property>
+<property><name>dfs.datanode.use.datanode.hostname</name><value>true</value></property>
+</configuration>
diff --git a/docker/conf/hadoop/core-site.xml b/docker/docker-compose/write/conf/hadoop-read/mapred-site.xml
similarity index 69%
copy from docker/conf/hadoop/core-site.xml
copy to docker/docker-compose/write/conf/hadoop-read/mapred-site.xml
index 6fe6404..d5cc450 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/docker-compose/write/conf/hadoop-read/mapred-site.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8"?>
+<?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,13 +17,6 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
 </configuration>
diff --git a/docker/docker-compose/write/conf/hadoop-read/yarn-site.xml b/docker/docker-compose/write/conf/hadoop-read/yarn-site.xml
new file mode 100644
index 0000000..392cf4c
--- /dev/null
+++ b/docker/docker-compose/write/conf/hadoop-read/yarn-site.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+<!-- Site specific YARN configuration properties -->
+
+<property><name>yarn.resourcemanager.fs.state-store.uri</name><value>/rmstate</value></property>
+<property><name>yarn.timeline-service.generic-application-history.enabled</name><value>true</value></property>
+<property><name>mapreduce.map.output.compress</name><value>true</value></property>
+<property><name>yarn.resourcemanager.recovery.enabled</name><value>true</value></property>
+<property><name>mapred.map.output.compress.codec</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>yarn.timeline-service.enabled</name><value>true</value></property>
+<property><name>yarn.log-aggregation-enable</name><value>true</value></property>
+<property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value></property>
+<property><name>yarn.resourcemanager.system-metrics-publisher.enabled</name><value>true</value></property>
+<property><name>yarn.nodemanager.remote-app-log-dir</name><value>/app-logs</value></property>
+<property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property>
+<property><name>yarn.resourcemanager.resource_tracker.address</name><value>read-resourcemanager:8031</value></property>
+<property><name>yarn.resourcemanager.hostname</name><value>read-resourcemanager</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-vcores</name><value>4</value></property>
+<property><name>yarn.timeline-service.hostname</name><value>read-historyserver</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-mb</name><value>8192</value></property>
+<property><name>yarn.log.server.url</name><value>http://read-historyserver:8188/applicationhistory/logs/</value></property>
+<property><name>yarn.resourcemanager.scheduler.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value></property>
+<property><name>yarn.resourcemanager.scheduler.address</name><value>read-resourcemanager:8030</value></property>
+<property><name>yarn.resourcemanager.address</name><value>read-resourcemanager:8032</value></property>
+<property><name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name><value>98.5</value></property>
+<property><name>yarn.nodemanager.resource.memory-mb</name><value>16384</value></property>
+<property><name>yarn.nodemanager.resource.cpu-vcores</name><value>8</value></property>
+<property><name>yarn.resourcemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.timeline-service.bind-host</name><value>0.0.0.0</value></property>
+</configuration>
diff --git a/docker/conf/hadoop/core-site.xml b/docker/docker-compose/write/conf/hadoop-write/core-site.xml
similarity index 63%
copy from docker/conf/hadoop/core-site.xml
copy to docker/docker-compose/write/conf/hadoop-write/core-site.xml
index 6fe6404..69fc462 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/docker-compose/write/conf/hadoop-write/core-site.xml
@@ -17,13 +17,9 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+<property><name>hadoop.proxyuser.hue.hosts</name><value>*</value></property>
+<property><name>fs.defaultFS</name><value>hdfs://write-namenode:8020</value></property>
+<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>hadoop.proxyuser.hue.groups</name><value>*</value></property>
+<property><name>hadoop.http.staticuser.user</name><value>root</value></property>
 </configuration>
diff --git a/docker/docker-compose/write/conf/hadoop-write/hdfs-site.xml b/docker/docker-compose/write/conf/hadoop-write/hdfs-site.xml
new file mode 100644
index 0000000..cdf7778
--- /dev/null
+++ b/docker/docker-compose/write/conf/hadoop-write/hdfs-site.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<property><name>dfs.namenode.name.dir</name><value>file:///hadoop/dfs/name</value></property>
+<property><name>dfs.namenode.datanode.registration.ip-hostname-check</name><value>false</value></property>
+<property><name>dfs.permissions.enabled</name><value>false</value></property>
+<property><name>dfs.webhdfs.enabled</name><value>true</value></property>
+<property><name>dfs.namenode.rpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.servicerpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.http-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.https-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.client.use.datanode.hostname</name><value>true</value></property>
+<property><name>dfs.datanode.use.datanode.hostname</name><value>true</value></property>
+</configuration>
diff --git a/docker/conf/hadoop/core-site.xml b/docker/docker-compose/write/conf/hadoop-write/mapred-site.xml
similarity index 69%
copy from docker/conf/hadoop/core-site.xml
copy to docker/docker-compose/write/conf/hadoop-write/mapred-site.xml
index 6fe6404..d5cc450 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/docker-compose/write/conf/hadoop-write/mapred-site.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8"?>
+<?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,13 +17,6 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
 </configuration>
diff --git a/docker/docker-compose/write/conf/hadoop-write/yarn-site.xml b/docker/docker-compose/write/conf/hadoop-write/yarn-site.xml
new file mode 100644
index 0000000..b55dd34
--- /dev/null
+++ b/docker/docker-compose/write/conf/hadoop-write/yarn-site.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+<!-- Site specific YARN configuration properties -->
+
+<property><name>yarn.resourcemanager.fs.state-store.uri</name><value>/rmstate</value></property>
+<property><name>yarn.timeline-service.generic-application-history.enabled</name><value>true</value></property>
+<property><name>mapreduce.map.output.compress</name><value>true</value></property>
+<property><name>yarn.resourcemanager.recovery.enabled</name><value>true</value></property>
+<property><name>mapred.map.output.compress.codec</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>yarn.timeline-service.enabled</name><value>true</value></property>
+<property><name>yarn.log-aggregation-enable</name><value>true</value></property>
+<property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value></property>
+<property><name>yarn.resourcemanager.system-metrics-publisher.enabled</name><value>true</value></property>
+<property><name>yarn.nodemanager.remote-app-log-dir</name><value>/app-logs</value></property>
+<property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property>
+<property><name>yarn.resourcemanager.resource_tracker.address</name><value>write-resourcemanager:8031</value></property>
+<property><name>yarn.resourcemanager.hostname</name><value>write-resourcemanager</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-vcores</name><value>4</value></property>
+<property><name>yarn.timeline-service.hostname</name><value>write-historyserver</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-mb</name><value>8192</value></property>
+<property><name>yarn.log.server.url</name><value>http://write-historyserver:8188/applicationhistory/logs/</value></property>
+<property><name>yarn.resourcemanager.scheduler.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value></property>
+<property><name>yarn.resourcemanager.scheduler.address</name><value>write-resourcemanager:8030</value></property>
+<property><name>yarn.resourcemanager.address</name><value>write-resourcemanager:8032</value></property>
+<property><name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name><value>98.5</value></property>
+<property><name>yarn.nodemanager.resource.memory-mb</name><value>16384</value></property>
+<property><name>yarn.nodemanager.resource.cpu-vcores</name><value>8</value></property>
+<property><name>yarn.resourcemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.timeline-service.bind-host</name><value>0.0.0.0</value></property>
+</configuration>
diff --git a/docker/conf/hadoop/core-site.xml b/docker/docker-compose/write/conf/hadoop/core-site.xml
similarity index 63%
copy from docker/conf/hadoop/core-site.xml
copy to docker/docker-compose/write/conf/hadoop/core-site.xml
index 6fe6404..dd5a81b 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/docker-compose/write/conf/hadoop/core-site.xml
@@ -17,13 +17,10 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+<property><name>hadoop.proxyuser.hue.hosts</name><value>*</value></property>
+<property><name>fs.defaultFS</name><value>hdfs://write-namenode:8020</value></property>
+<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>hadoop.proxyuser.hue.groups</name><value>*</value></property>
+<property><name>hadoop.http.staticuser.user</name><value>root</value></property>
+
 </configuration>
diff --git a/docker/docker-compose/write/conf/hadoop/hdfs-site.xml b/docker/docker-compose/write/conf/hadoop/hdfs-site.xml
new file mode 100644
index 0000000..cdf7778
--- /dev/null
+++ b/docker/docker-compose/write/conf/hadoop/hdfs-site.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<property><name>dfs.namenode.name.dir</name><value>file:///hadoop/dfs/name</value></property>
+<property><name>dfs.namenode.datanode.registration.ip-hostname-check</name><value>false</value></property>
+<property><name>dfs.permissions.enabled</name><value>false</value></property>
+<property><name>dfs.webhdfs.enabled</name><value>true</value></property>
+<property><name>dfs.namenode.rpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.servicerpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.http-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.https-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.client.use.datanode.hostname</name><value>true</value></property>
+<property><name>dfs.datanode.use.datanode.hostname</name><value>true</value></property>
+</configuration>
diff --git a/docker/conf/hadoop/core-site.xml b/docker/docker-compose/write/conf/hadoop/mapred-site.xml
similarity index 69%
copy from docker/conf/hadoop/core-site.xml
copy to docker/docker-compose/write/conf/hadoop/mapred-site.xml
index 6fe6404..d5cc450 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/docker-compose/write/conf/hadoop/mapred-site.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8"?>
+<?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,13 +17,6 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
 </configuration>
diff --git a/docker/docker-compose/write/conf/hadoop/yarn-site.xml b/docker/docker-compose/write/conf/hadoop/yarn-site.xml
new file mode 100644
index 0000000..b55dd34
--- /dev/null
+++ b/docker/docker-compose/write/conf/hadoop/yarn-site.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+<!-- Site specific YARN configuration properties -->
+
+<property><name>yarn.resourcemanager.fs.state-store.uri</name><value>/rmstate</value></property>
+<property><name>yarn.timeline-service.generic-application-history.enabled</name><value>true</value></property>
+<property><name>mapreduce.map.output.compress</name><value>true</value></property>
+<property><name>yarn.resourcemanager.recovery.enabled</name><value>true</value></property>
+<property><name>mapred.map.output.compress.codec</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>yarn.timeline-service.enabled</name><value>true</value></property>
+<property><name>yarn.log-aggregation-enable</name><value>true</value></property>
+<property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value></property>
+<property><name>yarn.resourcemanager.system-metrics-publisher.enabled</name><value>true</value></property>
+<property><name>yarn.nodemanager.remote-app-log-dir</name><value>/app-logs</value></property>
+<property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property>
+<property><name>yarn.resourcemanager.resource_tracker.address</name><value>write-resourcemanager:8031</value></property>
+<property><name>yarn.resourcemanager.hostname</name><value>write-resourcemanager</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-vcores</name><value>4</value></property>
+<property><name>yarn.timeline-service.hostname</name><value>write-historyserver</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-mb</name><value>8192</value></property>
+<property><name>yarn.log.server.url</name><value>http://write-historyserver:8188/applicationhistory/logs/</value></property>
+<property><name>yarn.resourcemanager.scheduler.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value></property>
+<property><name>yarn.resourcemanager.scheduler.address</name><value>write-resourcemanager:8030</value></property>
+<property><name>yarn.resourcemanager.address</name><value>write-resourcemanager:8032</value></property>
+<property><name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name><value>98.5</value></property>
+<property><name>yarn.nodemanager.resource.memory-mb</name><value>16384</value></property>
+<property><name>yarn.nodemanager.resource.cpu-vcores</name><value>8</value></property>
+<property><name>yarn.resourcemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.timeline-service.bind-host</name><value>0.0.0.0</value></property>
+</configuration>
diff --git a/docker/docker-compose/write/conf/hbase/hbase-site.xml b/docker/docker-compose/write/conf/hbase/hbase-site.xml
new file mode 100644
index 0000000..988d91c
--- /dev/null
+++ b/docker/docker-compose/write/conf/hbase/hbase-site.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+<property><name>hbase.zookeeper.quorum</name><value>read-zookeeper</value></property>
+<property><name>hbase.master</name><value>read-hbase-master:16000</value></property>
+<property><name>hbase.regionserver.port</name><value>16020</value></property>
+<property><name>hbase.regionserver.info.port</name><value>16030</value></property>
+<property><name>DIR</name><value>/etc/hbase</value></property>
+<property><name>hbase.cluster.distributed</name><value>true</value></property>
+<property><name>hbase.rootdir</name><value>hdfs://read-namenode:8020/hbase</value></property>
+<property><name>hbase.master.info.port</name><value>16010</value></property>
+<property><name>hbase.master.hostname</name><value>read-hbase-master</value></property>
+<property><name>hbase.master.port</name><value>16000</value></property>
+</configuration>
diff --git a/docker/docker-compose/write/conf/hive/hive-site.xml b/docker/docker-compose/write/conf/hive/hive-site.xml
new file mode 100644
index 0000000..c60fe36
--- /dev/null
+++ b/docker/docker-compose/write/conf/hive/hive-site.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--><configuration>
+    <property><name>hive.metastore.uris</name><value>thrift://write-hive-metastore:9083</value></property>
+    <property><name>datanucleus.autoCreateSchema</name><value>false</value></property>
+    <property><name>javax.jdo.option.ConnectionURL</name><value>jdbc:postgresql://write-hive-metastore-postgresql/metastore</value></property>
+    <property><name>javax.jdo.option.ConnectionDriverName</name><value>org.postgresql.Driver</value></property>
+    <property><name>javax.jdo.option.ConnectionPassword</name><value>hive</value></property>
+    <property><name>javax.jdo.option.ConnectionUserName</name><value>hive</value></property>
+</configuration>
+
diff --git a/docker/docker-compose/write/docker-compose-kafka.yml b/docker/docker-compose/write/docker-compose-kafka.yml
new file mode 100644
index 0000000..9590c62
--- /dev/null
+++ b/docker/docker-compose/write/docker-compose-kafka.yml
@@ -0,0 +1,18 @@
+version: "3.3"
+
+services:
+  write-kafka:
+    image: ${KAFKA_IMAGETAG:-bitnami/kafka:2.0.0}
+    container_name: write-kafkabroker
+    hostname: write-kafkabroker
+    environment:
+      - KAFKA_ZOOKEEPER_CONNECT=write-zookeeper:2181
+      - ALLOW_PLAINTEXT_LISTENER=yes
+    networks:
+      - write_kylin
+    ports:
+      - 9092:9092
+
+networks:
+  write_kylin:
+    external: true
\ No newline at end of file
diff --git a/docker/docker-compose/write/docker-compose-write.yml b/docker/docker-compose/write/docker-compose-write.yml
new file mode 100644
index 0000000..aefe726
--- /dev/null
+++ b/docker/docker-compose/write/docker-compose-write.yml
@@ -0,0 +1,215 @@
+version: "3.3"
+
+services:
+  write-namenode:
+    image: ${HADOOP_NAMENODE_IMAGETAG:-bde2020/hadoop-namenode:2.0.0-hadoop2.7.4-java8}
+    container_name: write-namenode
+    hostname: write-namenode
+    volumes:
+      - ./data/write_hadoop_namenode:/hadoop/dfs/name
+    environment:
+      - CLUSTER_NAME=test-write
+    env_file:
+      - write-hadoop.env
+    expose:
+      - 8020
+    ports:
+      - 50070:50070
+
+  write-datanode1:
+    image: ${HADOOP_DATANODE_IMAGETAG:-bde2020/hadoop-datanode:2.0.0-hadoop2.7.4-java8}
+    container_name: write-datanode1
+    hostname: write-datanode1
+    volumes:
+      - ./data/write_hadoop_datanode1:/hadoop/dfs/data
+    environment:
+      SERVICE_PRECONDITION: "write-namenode:50070"
+    env_file:
+      - write-hadoop.env
+    links:
+      - write-namenode
+
+  write-datanode2:
+    image: ${HADOOP_DATANODE_IMAGETAG:-bde2020/hadoop-datanode:2.0.0-hadoop2.7.4-java8}
+    container_name: write-datanode2
+    hostname: write-datanode2
+    volumes:
+      - ./data/write_hadoop_datanode2:/hadoop/dfs/data
+    environment:
+      SERVICE_PRECONDITION: "write-namenode:50070"
+    env_file:
+      - write-hadoop.env
+
+  write-datanode3:
+    image: ${HADOOP_DATANODE_IMAGETAG:-bde2020/hadoop-datanode:2.0.0-hadoop2.7.4-java8}
+    container_name: write-datanode3
+    hostname: write-datanode3
+    volumes:
+      - ./data/write_hadoop_datanode3:/hadoop/dfs/data
+    environment:
+      SERVICE_PRECONDITION: "write-namenode:50070"
+    env_file:
+      - write-hadoop.env
+
+  write-resourcemanager:
+    image: ${HADOOP_RESOURCEMANAGER_IMAGETAG:-bde2020/hadoop-resourcemanager:2.0.0-hadoop2.7.4-java8}
+    container_name: write-resourcemanager
+    hostname: write-resourcemanager
+    environment:
+      SERVICE_PRECONDITION: "write-namenode:50070 write-datanode1:50075 write-datanode2:50075 write-datanode3:50075"
+    env_file:
+      - write-hadoop.env
+    ports:
+      - 8088:8088
+
+  write-nodemanager1:
+    image: ${HADOOP_NODEMANAGER_IMAGETAG:-bde2020/hadoop-nodemanager:2.0.0-hadoop2.7.4-java8}
+    container_name: write-nodemanager1
+    hostname: write-nodemanager1
+    environment:
+      SERVICE_PRECONDITION: "write-namenode:50070 write-datanode1:50075 write-datanode2:50075 write-datanode3:50075 write-resourcemanager:8088"
+    env_file:
+      - write-hadoop.env
+
+  write-nodemanager2:
+    image: ${HADOOP_NODEMANAGER_IMAGETAG:-bde2020/hadoop-nodemanager:2.0.0-hadoop2.7.4-java8}
+    container_name: write-nodemanager2
+    hostname: write-nodemanager2
+    environment:
+      SERVICE_PRECONDITION: "write-namenode:50070 write-datanode1:50075 write-datanode2:50075 write-datanode3:50075 write-resourcemanager:8088"
+    env_file:
+      - write-hadoop.env
+
+  write-historyserver:
+    image: ${HADOOP_HISTORYSERVER_IMAGETAG:-bde2020/hadoop-historyserver:2.0.0-hadoop2.7.4-java8}
+    container_name: write-historyserver
+    hostname: write-historyserver
+    volumes:
+      - ./data/write_hadoop_historyserver:/hadoop/yarn/timeline
+    environment:
+      SERVICE_PRECONDITION: "write-namenode:50070 write-datanode1:50075 write-datanode2:50075 write-datanode3:50075 write-resourcemanager:8088"
+    env_file:
+      - write-hadoop.env
+    ports:
+      - 8188:8188
+
+  write-hive-server:
+    image: ${HIVE_IMAGETAG:-apachekylin/kylin-hive:hive_1.2.2_hadoop_2.8.5}
+    container_name: write-hive-server
+    hostname: write-hive-server
+    env_file:
+      - write-hadoop.env
+    environment:
+#      HIVE_CORE_CONF_javax_jdo_option_ConnectionURL: "jdbc:postgresql://write-hive-metastore/metastore"
+      HIVE_CORE_CONF_javax_jdo_option_ConnectionURL: "jdbc:mysql://metastore-db/metastore"
+      SERVICE_PRECONDITION: "write-hive-metastore:9083"
+    ports:
+      - 10000:10000
+
+  write-hive-metastore:
+#    image: ${HIVE_IMAGETAG:-bde2020/hive:2.3.2-postgresql-metastore}
+    image: ${HIVE_IMAGETAG:-apachekylin/kylin-hive:hive_1.2.2_hadoop_2.8.5}
+    container_name: write-hive-metastore
+    hostname: write-hive-metastore
+    env_file:
+      - write-hadoop.env
+    command: /opt/hive/bin/hive --service metastore
+    expose:
+      - 9083
+    environment:
+      SERVICE_PRECONDITION: "write-namenode:50070 write-datanode1:50075 write-datanode2:50075 write-datanode3:50075 metastore-db:3306"
+#       SERVICE_PRECONDITION: "write-namenode:50070 write-datanode1:50075 write-datanode2:50075 write-datanode3:50075 write-hive-metastore-postgresql:5432"
+
+#  write-hive-metastore-postgresql:
+#    image: bde2020/hive-metastore-postgresql:2.3.0
+#    container_name: write-hive-metastore-postgresql
+#    hostname: write-hive-metastore-postgresql
+
+  metastore-db:
+    image: mysql:5.6.49
+    container_name: metastore-db
+    hostname: metastore-db
+    volumes:
+      - ./data/mysql:/var/lib/mysql
+    environment:
+      - MYSQL_ROOT_PASSWORD=kylin
+      - MYSQL_DATABASE=metastore
+      - MYSQL_USER=kylin
+      - MYSQL_PASSWORD=kylin
+    ports:
+      - 3306:3306
+
+  write-zookeeper:
+    image: ${ZOOKEEPER_IMAGETAG:-zookeeper:3.4.10}
+    container_name: write-zookeeper
+    hostname: write-zookeeper
+    environment:
+      ZOO_MY_ID: 1
+      ZOO_SERVERS: server.1=0.0.0.0:2888:3888
+    ports:
+      - 2181:2181
+
+  write-kafka:
+    image: ${KAFKA_IMAGETAG:-bitnami/kafka:2.0.0}
+    container_name: write-kafkabroker
+    hostname: write-kafkabroker
+    environment:
+      - KAFKA_ZOOKEEPER_CONNECT=write-zookeeper:2181
+      - ALLOW_PLAINTEXT_LISTENER=yes
+    ports:
+      - 9092:9092
+
+  kerberos-kdc:
+    image: ${KERBEROS_IMAGE}
+    container_name: kerberos-kdc
+    hostname: kerberos-kdc
+
+  write-hbase-master:
+    image: ${HBASE_MASTER_IMAGETAG:-bde2020/hbase-master:1.0.0-hbase1.2.6}
+    container_name: write-hbase-master
+    hostname: write-hbase-master
+    env_file:
+      - write-hbase-distributed-local.env
+    environment:
+      SERVICE_PRECONDITION: "write-namenode:50070 write-datanode1:50075 write-datanode2:50075 write-datanode3:50075 write-zookeeper:2181"
+    ports:
+      - 16010:16010
+
+  write-hbase-regionserver1:
+    image: ${HBASE_REGIONSERVER_IMAGETAG:-bde2020/hbase-regionserver:1.0.0-hbase1.2.6}
+    container_name: write-hbase-regionserver1
+    hostname: write-hbase-regionserver1
+    env_file:
+      - write-hbase-distributed-local.env
+    environment:
+      HBASE_CONF_hbase_regionserver_hostname: write-hbase-regionserver1
+      SERVICE_PRECONDITION: "write-namenode:50070 write-datanode1:50075 write-datanode2:50075 write-datanode3:50075 write-zookeeper:2181 write-hbase-master:16010"
+
+  write-hbase-regionserver2:
+    image: ${HBASE_REGIONSERVER_IMAGETAG:-bde2020/hbase-regionserver:1.0.0-hbase1.2.6}
+    container_name: write-hbase-regionserver2
+    hostname: write-hbase-regionserver2
+    env_file:
+      - write-hbase-distributed-local.env
+    environment:
+      HBASE_CONF_hbase_regionserver_hostname: write-hbase-regionserver2
+      SERVICE_PRECONDITION: "write-namenode:50070 write-datanode1:50075 write-datanode2:50075 write-datanode3:50075 write-zookeeper:2181 write-hbase-master:16010"
+
+  kylin-all:
+    image: ${CLIENT_IMAGETAG}
+    container_name: kylin-all
+    hostname: kylin-all
+    volumes:
+      - ./conf/hadoop:/etc/hadoop/conf
+      - ./conf/hbase:/etc/hbase/conf
+      - ./conf/hive:/etc/hive/conf
+      - ./kylin:/opt/kylin/
+    env_file:
+      - client.env
+    environment:
+      HADOOP_CONF_DIR: /etc/hadoop/conf
+      HIVE_CONF_DIR: /etc/hive/conf
+      HBASE_CONF_DIR: /etc/hbase/conf
+      KYLIN_HOME: /opt/kylin/kylin
+    ports:
+      - 7070:7070
diff --git a/docker/docker-compose/write/docker-compose-zookeeper.yml b/docker/docker-compose/write/docker-compose-zookeeper.yml
new file mode 100644
index 0000000..cece11b
--- /dev/null
+++ b/docker/docker-compose/write/docker-compose-zookeeper.yml
@@ -0,0 +1,18 @@
+version: "3.3"
+
+services:
+  write-zookeeper:
+    image: ${ZOOKEEPER_IMAGETAG:-zookeeper:3.4.10}
+    container_name: write-zookeeper
+    hostname: write-zookeeper
+    environment:
+      ZOO_MY_ID: 1
+      ZOO_SERVERS: server.1=0.0.0.0:2888:3888
+    networks:
+      - write_kylin
+    ports:
+      - 2181:2181
+
+networks:
+  write_kylin:
+    external: true
\ No newline at end of file
diff --git a/docker/docker-compose/write/write-hadoop.env b/docker/docker-compose/write/write-hadoop.env
new file mode 100644
index 0000000..8ec98c9
--- /dev/null
+++ b/docker/docker-compose/write/write-hadoop.env
@@ -0,0 +1,47 @@
+CORE_CONF_fs_defaultFS=hdfs://write-namenode:8020
+CORE_CONF_hadoop_http_staticuser_user=root
+CORE_CONF_hadoop_proxyuser_hue_hosts=*
+CORE_CONF_hadoop_proxyuser_hue_groups=*
+CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec
+
+HDFS_CONF_dfs_webhdfs_enabled=true
+HDFS_CONF_dfs_permissions_enabled=false
+HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false
+
+YARN_CONF_yarn_log___aggregation___enable=true
+YARN_CONF_yarn_log_server_url=http://write-historyserver:8188/applicationhistory/logs/
+YARN_CONF_yarn_resourcemanager_recovery_enabled=true
+YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
+YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=8192
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=4
+YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
+YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
+YARN_CONF_yarn_resourcemanager_hostname=write-resourcemanager
+YARN_CONF_yarn_resourcemanager_address=write-resourcemanager:8032
+YARN_CONF_yarn_resourcemanager_scheduler_address=write-resourcemanager:8030
+YARN_CONF_yarn_resourcemanager_resource__tracker_address=write-resourcemanager:8031
+YARN_CONF_yarn_timeline___service_enabled=true
+YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
+YARN_CONF_yarn_timeline___service_hostname=write-historyserver
+YARN_CONF_mapreduce_map_output_compress=true
+YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
+YARN_CONF_yarn_nodemanager_resource_memory___mb=16384
+YARN_CONF_yarn_nodemanager_resource_cpu___vcores=8
+YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
+YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
+YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
+
+MAPRED_CONF_mapreduce_framework_name=yarn
+MAPRED_CONF_mapred_child_java_opts=-Xmx4096m
+MAPRED_CONF_mapreduce_map_memory_mb=4096
+MAPRED_CONF_mapreduce_reduce_memory_mb=8192
+MAPRED_CONF_mapreduce_map_java_opts=-Xmx3072m
+MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx6144m
+
+HIVE_SITE_CONF_javax_jdo_option_ConnectionURL=jdbc:mysql://metastore-db/metastore
+HIVE_SITE_CONF_javax_jdo_option_ConnectionDriverName=com.mysql.jdbc.Driver
+HIVE_SITE_CONF_javax_jdo_option_ConnectionUserName=kylin
+HIVE_SITE_CONF_javax_jdo_option_ConnectionPassword=kylin
+HIVE_SITE_CONF_datanucleus_autoCreateSchema=true
+HIVE_SITE_CONF_hive_metastore_uris=thrift://write-hive-metastore:9083
\ No newline at end of file
diff --git a/docker/docker-compose/write/write-hbase-distributed-local.env b/docker/docker-compose/write/write-hbase-distributed-local.env
new file mode 100644
index 0000000..c866cef
--- /dev/null
+++ b/docker/docker-compose/write/write-hbase-distributed-local.env
@@ -0,0 +1,12 @@
+HBASE_CONF_hbase_rootdir=hdfs://write-namenode:8020/hbase
+HBASE_CONF_hbase_cluster_distributed=true
+HBASE_CONF_hbase_zookeeper_quorum=write-zookeeper
+
+HBASE_CONF_hbase_master=write-hbase-master:16000
+HBASE_CONF_hbase_master_hostname=write-hbase-master
+HBASE_CONF_hbase_master_port=16000
+HBASE_CONF_hbase_master_info_port=16010
+HBASE_CONF_hbase_regionserver_port=16020
+HBASE_CONF_hbase_regionserver_info_port=16030
+
+HBASE_MANAGES_ZK=false
\ No newline at end of file
diff --git a/docker/dockerfile/cluster/base/Dockerfile b/docker/dockerfile/cluster/base/Dockerfile
new file mode 100644
index 0000000..ccc05b3
--- /dev/null
+++ b/docker/dockerfile/cluster/base/Dockerfile
@@ -0,0 +1,78 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+FROM centos:7.3.1611
+MAINTAINER kylin
+
+USER root
+
+ARG JAVA_VERSION=jdk1.8.0_141
+ARG HADOOP_VERSION=2.8.5
+ARG INSTALL_FROM=local
+ARG HADOOP_URL=https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz
+
+ENV JAVA_HOME /opt/${JAVA_VERSION}
+ENV HADOOP_VERSION ${HADOOP_VERSION}
+ENV INSTALL_FROM ${INSTALL_FROM}
+ENV HADOOP_URL ${HADOOP_URL}
+
+# install tools
+RUN yum -y install lsof wget tar git unzip wget curl net-tools procps perl sed nc which
+
+# setup jdk
+RUN wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u141-b15/336fa29ff2bb4ef291e347e091f7f4a7/jdk-8u141-linux-x64.tar.gz" -P /opt \
+    && tar -zxvf /opt/jdk-8u141-linux-x64.tar.gz -C /opt/ \
+    && rm -f /opt/jdk-8u141-linux-x64.tar.gz
+
+# use buildkit
+#IF $INSTALL_FROM=="net"
+#RUN set -x \
+#    && echo "Fetch URL2 is : ${HADOOP_URL}" \
+#    && curl -fSL "${HADOOP_URL}" -o /tmp/hadoop.tar.gz \
+#    && curl -fSL "${HADOOP_URL}.asc" -o /tmp/hadoop.tar.gz.asc \
+#ELSE IF $INSTALL_FROM=="local"
+#COPY ${PACKAGE_PATH}hadoop-${HADOOP_VERSION}.tar.gz /tmp/hadoop.tar.gz
+#COPY ${PACKAGE_PATH}hadoop-${HADOOP_VERSION}.tar.gz.asc /tmp/hadoop.tar.gz.asc
+#DONE
+
+RUN set -x \
+    && echo "Fetch URL2 is : ${HADOOP_URL}" \
+    && curl -fSL "${HADOOP_URL}" -o /tmp/hadoop.tar.gz \
+    && curl -fSL "${HADOOP_URL}.asc" -o /tmp/hadoop.tar.gz.asc \
+
+RUN set -x \
+    && tar -xvf /tmp/hadoop.tar.gz -C /opt/ \
+    && rm /tmp/hadoop.tar.gz* \
+    && ln -s /opt/hadoop-$HADOOP_VERSION/etc/hadoop /etc/hadoop \
+    && cp /etc/hadoop/mapred-site.xml.template /etc/hadoop/mapred-site.xml \
+    && mkdir -p /opt/hadoop-$HADOOP_VERSION/logs \
+    && mkdir /hadoop-data
+
+ENV HADOOP_PREFIX=/opt/hadoop-$HADOOP_VERSION
+ENV HADOOP_CONF_DIR=/etc/hadoop
+ENV MULTIHOMED_NETWORK=1
+ENV HADOOP_HOME=${HADOOP_PREFIX}
+ENV HADOOP_INSTALL=${HADOOP_HOME}
+
+ENV USER=root
+ENV PATH $JAVA_HOME/bin:/usr/bin:/bin:$HADOOP_PREFIX/bin/:$PATH
+
+ADD entrypoint.sh /opt/entrypoint/hadoop/entrypoint.sh
+RUN chmod a+x /opt/entrypoint/hadoop/entrypoint.sh
+
+ENTRYPOINT ["/opt/entrypoint/hadoop/entrypoint.sh"]
+
diff --git a/docker/dockerfile/cluster/base/entrypoint.sh b/docker/dockerfile/cluster/base/entrypoint.sh
new file mode 100644
index 0000000..3479844
--- /dev/null
+++ b/docker/dockerfile/cluster/base/entrypoint.sh
@@ -0,0 +1,140 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+#######################################################################################
+##            COPIED FROM                                                            ##
+##  https://github.com/big-data-europe/docker-hadoop/blob/master/base/entrypoint.sh  ##
+#                                                                                    ##
+#######################################################################################
+
+# Set some sensible defaults
+export CORE_CONF_fs_defaultFS=${CORE_CONF_fs_defaultFS:-hdfs://`hostname -f`:8020}
+
+function addProperty() {
+  local path=$1
+  local name=$2
+  local value=$3
+
+  local entry="<property><name>$name</name><value>${value}</value></property>"
+  local escapedEntry=$(echo $entry | sed 's/\//\\\//g')
+  sed -i "/<\/configuration>/ s/.*/${escapedEntry}\n&/" $path
+}
+
+function configure() {
+    local path=$1
+    local module=$2
+    local envPrefix=$3
+
+    local var
+    local value
+
+    echo "Configuring $module"
+    for c in `printenv | perl -sne 'print "$1 " if m/^${envPrefix}_(.+?)=.*/' -- -envPrefix=$envPrefix`; do
+        name=`echo ${c} | perl -pe 's/___/-/g; s/__/@/g; s/_/./g; s/@/_/g;'`
+        var="${envPrefix}_${c}"
+        value=${!var}
+        echo " - Setting $name=$value"
+        addProperty /etc/hadoop/$module-site.xml $name "$value"
+    done
+}
+
+configure /etc/hadoop/core-site.xml core CORE_CONF
+configure /etc/hadoop/hdfs-site.xml hdfs HDFS_CONF
+configure /etc/hadoop/yarn-site.xml yarn YARN_CONF
+configure /etc/hadoop/httpfs-site.xml httpfs HTTPFS_CONF
+configure /etc/hadoop/kms-site.xml kms KMS_CONF
+
+if [ "$MULTIHOMED_NETWORK" = "1" ]; then
+    echo "Configuring for multihomed network"
+
+    # HDFS
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.rpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.servicerpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.http-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.https-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.client.use.datanode.hostname true
+    addProperty /etc/hadoop/hdfs-site.xml dfs.datanode.use.datanode.hostname true
+
+    # YARN
+    addProperty /etc/hadoop/yarn-site.xml yarn.resourcemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.timeline-service.bind-host 0.0.0.0
+
+    # MAPRED
+    addProperty /etc/hadoop/mapred-site.xml yarn.nodemanager.bind-host 0.0.0.0
+fi
+
+if [ -n "$GANGLIA_HOST" ]; then
+    mv /etc/hadoop/hadoop-metrics.properties /etc/hadoop/hadoop-metrics.properties.orig
+    mv /etc/hadoop/hadoop-metrics2.properties /etc/hadoop/hadoop-metrics2.properties.orig
+
+    for module in mapred jvm rpc ugi; do
+        echo "$module.class=org.apache.hadoop.metrics.ganglia.GangliaContext31"
+        echo "$module.period=10"
+        echo "$module.servers=$GANGLIA_HOST:8649"
+    done > /etc/hadoop/hadoop-metrics.properties
+
+    for module in namenode datanode resourcemanager nodemanager mrappmaster jobhistoryserver; do
+        echo "$module.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31"
+        echo "$module.sink.ganglia.period=10"
+        echo "$module.sink.ganglia.supportsparse=true"
+        echo "$module.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both"
+        echo "$module.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40"
+        echo "$module.sink.ganglia.servers=$GANGLIA_HOST:8649"
+    done > /etc/hadoop/hadoop-metrics2.properties
+fi
+
+function wait_for_it()
+{
+    local serviceport=$1
+    local service=${serviceport%%:*}
+    local port=${serviceport#*:}
+    local retry_seconds=5
+    local max_try=100
+    let i=1
+
+    nc -z $service $port
+    result=$?
+
+    until [ $result -eq 0 ]; do
+      echo "[$i/$max_try] check for ${service}:${port}..."
+      echo "[$i/$max_try] ${service}:${port} is not available yet"
+      if (( $i == $max_try )); then
+        echo "[$i/$max_try] ${service}:${port} is still not available; giving up after ${max_try} tries. :/"
+        exit 1
+      fi
+
+      echo "[$i/$max_try] try in ${retry_seconds}s once again ..."
+      let "i++"
+      sleep $retry_seconds
+
+      nc -z $service $port
+      result=$?
+    done
+    echo "[$i/$max_try] $service:${port} is available."
+}
+
+for i in ${SERVICE_PRECONDITION[@]}
+do
+    wait_for_it ${i}
+done
+
+exec "$@"
diff --git a/docker/dockerfile/cluster/client/Dockerfile b/docker/dockerfile/cluster/client/Dockerfile
new file mode 100644
index 0000000..38cbbac
--- /dev/null
+++ b/docker/dockerfile/cluster/client/Dockerfile
@@ -0,0 +1,157 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ARG JAVA_VERSION=jdk1.8.0_141
+ARG HADOOP_VERSION=2.8.5
+ARG HIVE_VERSION=1.2.1
+ARG HBASE_VERSION=1.1.2
+ARG ZOOKEEPER_VERSION=3.4.10
+ARG KAFKA_VERSION=2.0.0
+ARG SPARK_VERSION=2.3.1
+ARG SPARK_HADOOP_VERSION=2.6
+
+FROM apachekylin/kylin-hive:hive_${HIVE_VERSION}_hadoop_${HADOOP_VERSION} AS hive
+ENV JAVA_VERSION ${JAVA_VERSION}
+ENV HADOOP_VERSION ${HADOOP_VERSION}
+ENV HIVE_VERSION ${HIVE_VERSION}
+
+ARG HBASE_VERSION=1.1.2
+FROM apachekylin/kylin-hbase-master:hbase_${HBASE_VERSION} AS hbase
+ENV HBASE_VERSION ${HBASE_VERSION}
+
+
+ARG ZOOKEEPER_VERSION=3.4.10
+FROM zookeeper:${ZOOKEEPER_VERSION} AS zk
+ENV ZOOKEEPER_VERSION ${ZOOKEEPER_VERSION}
+
+ARG KAFKA_VERSION=2.0.0
+FROM bitnami/kafka:${KAFKA_VERSION} AS kafka
+ENV KAFKA_VERSION ${KAFKA_VERSION}
+
+FROM centos:7.3.1611
+MAINTAINER kylin
+USER root
+
+ARG JAVA_VERSION=jdk1.8.0_141
+ARG HADOOP_VERSION=2.8.5
+ARG HIVE_VERSION=1.2.1
+ARG HBASE_VERSION=1.1.2
+ARG ZOOKEEPER_VERSION=3.4.10
+ARG KAFKA_VERSION=2.0.0
+ARG SPARK_VERSION=2.3.1
+ARG SPARK_HADOOP_VERSION=2.6
+
+ENV JAVA_VERSION ${JAVA_VERSION}
+ENV HADOOP_VERSION ${HADOOP_VERSION}
+ENV HIVE_VERSION ${HIVE_VERSION}
+ENV HBASE_VERSION ${HBASE_VERSION}
+ENV ZOOKEEPER_VERSION ${ZOOKEEPER_VERSION}
+ENV KAFKA_VERSION ${KAFKA_VERSION}
+ENV SPARK_VERSION ${SPARK_VERSION}
+ENV SPARK_HADOOP_VERSION ${SPARK_HADOOP_VERSION}
+
+## install tools
+RUN yum -y install lsof wget tar git unzip wget curl net-tools procps perl sed nc which
+# install kerberos
+RUN yum -y install krb5-server krb5-libs krb5-auth-dialog krb5-workstation
+
+RUN mkdir /opt/hadoop-$HADOOP_VERSION/
+
+COPY --from=hive /opt/jdk1.8.0_141/ /opt/jdk1.8.0_141/
+COPY --from=hive /opt/hadoop-$HADOOP_VERSION/ /opt/hadoop-$HADOOP_VERSION/
+COPY --from=hive /opt/hive/ /opt/hive/
+COPY --from=hive /opt/entrypoint/hadoop/entrypoint.sh /opt/entrypoint/hadoop/entrypoint.sh
+RUN chmod a+x /opt/entrypoint/hadoop/entrypoint.sh
+COPY --from=hive /opt/entrypoint/hive/entrypoint.sh /opt/entrypoint/hive/entrypoint.sh
+RUN chmod a+x /opt/entrypoint/hive/entrypoint.sh
+
+
+COPY --from=hbase /opt/hbase-$HBASE_VERSION/ /opt/hbase-$HBASE_VERSION/
+COPY --from=hbase /opt/entrypoint/hbase/entrypoint.sh /opt/entrypoint/hbase/entrypoint.sh
+RUN chmod a+x /opt/entrypoint/hbase/entrypoint.sh
+
+
+COPY --from=zk /zookeeper-${ZOOKEEPER_VERSION}/ /opt/zookeeper-${ZOOKEEPER_VERSION}/
+COPY --from=zk /docker-entrypoint.sh /opt/entrypoint/zookeeper/entrypoint.sh
+RUN chmod a+x /opt/entrypoint/zookeeper/entrypoint.sh
+
+COPY --from=kafka /opt/bitnami/kafka /opt/kafka
+COPY --from=kafka /app-entrypoint.sh /opt/entrypoint/kafka/entrypoint.sh
+RUN chmod a+x /opt/entrypoint/kafka/entrypoint.sh
+
+
+RUN set -x \
+    && ln -s /opt/hadoop-$HADOOP_VERSION/etc/hadoop /etc/hadoop \
+    && cp /etc/hadoop/mapred-site.xml.template /etc/hadoop/mapred-site.xml \
+    && mkdir -p /opt/hadoop-$HADOOP_VERSION/logs
+
+RUN ln -s /opt/hbase-$HBASE_VERSION/conf /etc/hbase
+
+
+ENV JAVA_HOME=/opt/${JAVA_VERSION}
+
+ENV HADOOP_PREFIX=/opt/hadoop-$HADOOP_VERSION
+ENV HADOOP_CONF_DIR=/etc/hadoop
+ENV HADOOP_HOME=${HADOOP_PREFIX}
+ENV HADOOP_INSTALL=${HADOOP_HOME}
+
+ENV HIVE_HOME=/opt/hive
+
+ENV HBASE_PREFIX=/opt/hbase-$HBASE_VERSION
+ENV HBASE_CONF_DIR=/etc/hbase
+ENV HBASE_HOME=${HBASE_PREFIX}
+
+
+ENV ZK_HOME=/opt/zookeeper-${ZOOKEEPER_VERSION}
+ENV ZOOCFGDIR=$ZK_HOME/conf
+ENV ZOO_USER=zookeeper
+ENV ZOO_CONF_DIR=$ZK_HOME/conf ZOO_PORT=2181 ZOO_TICK_TIME=2000 ZOO_INIT_LIMIT=5 ZOO_SYNC_LIMIT=2 ZOO_MAX_CLIENT_CNXNS=60
+
+ENV SPARK_URL=https://archive.apache.org/dist/spark/spark-$SPARK_VERSION/spark-$SPARK_VERSION-bin-hadoop${SPARK_HADOOP_VERSION}.tgz
+ENV SPARK_HOME=/opt/spark-$SPARK_VERSION-bin-hadoop${SPARK_HADOOP_VERSION}
+ENV SPARK_CONF_DIR=/opt/spark-$SPARK_VERSION-bin-hadoop${SPARK_HADOOP_VERSION}/conf
+
+RUN curl -fSL "${SPARK_URL}" -o /tmp/spark.tar.gz \
+    && tar -zxvf /tmp/spark.tar.gz -C /opt/ \
+    && rm -f /tmp/spark.tar.gz \
+    && cp $HIVE_HOME/conf/hive-site.xml $SPARK_HOME/conf \
+    && cp $SPARK_HOME/yarn/*.jar $HADOOP_HOME/share/hadoop/yarn/lib
+
+#COPY spark-$SPARK_VERSION-bin-hadoop${SPARK_HADOOP_VERSION}.tgz /tmp/spark.tar.gz
+#RUN tar -zxvf /tmp/spark.tar.gz -C /opt/ \
+#    && rm -f /tmp/spark.tar.gz \
+#    && cp $HIVE_HOME/conf/hive-site.xml $SPARK_HOME/conf \
+#    && cp $SPARK_HOME/yarn/*.jar $HADOOP_HOME/share/hadoop/yarn/lib
+
+#RUN cp $HIVE_HOME/lib/mysql-connector-java.jar $SPARK_HOME/jars
+RUN cp $HIVE_HOME/lib/postgresql-jdbc.jar  $SPARK_HOME/jars
+RUN cp $HBASE_HOME/lib/hbase-protocol-${HBASE_VERSION}.jar $SPARK_HOME/jars
+RUN echo spark.sql.catalogImplementation=hive > $SPARK_HOME/conf/spark-defaults.conf
+
+
+ENV PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HIVE_HOME/bin:$HBASE_HOME/bin:$ZK_HOME/bin
+
+# 设置所有组件的客户端配置
+COPY entrypoint.sh /opt/entrypoint/client/entrypoint.sh
+RUN chmod a+x /opt/entrypoint/client/entrypoint.sh
+
+COPY run_cli.sh /run_cli.sh
+RUN chmod a+x  /run_cli.sh
+
+#ENTRYPOINT ["/opt/entrypoint/client/entrypoint.sh"]
+
+CMD ["/run_cli.sh"]
diff --git a/docker/conf/hadoop/core-site.xml b/docker/dockerfile/cluster/client/conf/hadoop-read/core-site.xml
similarity index 63%
copy from docker/conf/hadoop/core-site.xml
copy to docker/dockerfile/cluster/client/conf/hadoop-read/core-site.xml
index 6fe6404..69fc462 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/dockerfile/cluster/client/conf/hadoop-read/core-site.xml
@@ -17,13 +17,9 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+<property><name>hadoop.proxyuser.hue.hosts</name><value>*</value></property>
+<property><name>fs.defaultFS</name><value>hdfs://write-namenode:8020</value></property>
+<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>hadoop.proxyuser.hue.groups</name><value>*</value></property>
+<property><name>hadoop.http.staticuser.user</name><value>root</value></property>
 </configuration>
diff --git a/docker/dockerfile/cluster/client/conf/hadoop-read/hdfs-site.xml b/docker/dockerfile/cluster/client/conf/hadoop-read/hdfs-site.xml
new file mode 100644
index 0000000..cdf7778
--- /dev/null
+++ b/docker/dockerfile/cluster/client/conf/hadoop-read/hdfs-site.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<property><name>dfs.namenode.name.dir</name><value>file:///hadoop/dfs/name</value></property>
+<property><name>dfs.namenode.datanode.registration.ip-hostname-check</name><value>false</value></property>
+<property><name>dfs.permissions.enabled</name><value>false</value></property>
+<property><name>dfs.webhdfs.enabled</name><value>true</value></property>
+<property><name>dfs.namenode.rpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.servicerpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.http-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.https-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.client.use.datanode.hostname</name><value>true</value></property>
+<property><name>dfs.datanode.use.datanode.hostname</name><value>true</value></property>
+</configuration>
diff --git a/docker/conf/hadoop/core-site.xml b/docker/dockerfile/cluster/client/conf/hadoop-read/mapred-site.xml
similarity index 69%
copy from docker/conf/hadoop/core-site.xml
copy to docker/dockerfile/cluster/client/conf/hadoop-read/mapred-site.xml
index 6fe6404..d5cc450 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/dockerfile/cluster/client/conf/hadoop-read/mapred-site.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8"?>
+<?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,13 +17,6 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
 </configuration>
diff --git a/docker/dockerfile/cluster/client/conf/hadoop-read/yarn-site.xml b/docker/dockerfile/cluster/client/conf/hadoop-read/yarn-site.xml
new file mode 100644
index 0000000..392cf4c
--- /dev/null
+++ b/docker/dockerfile/cluster/client/conf/hadoop-read/yarn-site.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+<!-- Site specific YARN configuration properties -->
+
+<property><name>yarn.resourcemanager.fs.state-store.uri</name><value>/rmstate</value></property>
+<property><name>yarn.timeline-service.generic-application-history.enabled</name><value>true</value></property>
+<property><name>mapreduce.map.output.compress</name><value>true</value></property>
+<property><name>yarn.resourcemanager.recovery.enabled</name><value>true</value></property>
+<property><name>mapred.map.output.compress.codec</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>yarn.timeline-service.enabled</name><value>true</value></property>
+<property><name>yarn.log-aggregation-enable</name><value>true</value></property>
+<property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value></property>
+<property><name>yarn.resourcemanager.system-metrics-publisher.enabled</name><value>true</value></property>
+<property><name>yarn.nodemanager.remote-app-log-dir</name><value>/app-logs</value></property>
+<property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property>
+<property><name>yarn.resourcemanager.resource_tracker.address</name><value>read-resourcemanager:8031</value></property>
+<property><name>yarn.resourcemanager.hostname</name><value>read-resourcemanager</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-vcores</name><value>4</value></property>
+<property><name>yarn.timeline-service.hostname</name><value>read-historyserver</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-mb</name><value>8192</value></property>
+<property><name>yarn.log.server.url</name><value>http://read-historyserver:8188/applicationhistory/logs/</value></property>
+<property><name>yarn.resourcemanager.scheduler.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value></property>
+<property><name>yarn.resourcemanager.scheduler.address</name><value>read-resourcemanager:8030</value></property>
+<property><name>yarn.resourcemanager.address</name><value>read-resourcemanager:8032</value></property>
+<property><name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name><value>98.5</value></property>
+<property><name>yarn.nodemanager.resource.memory-mb</name><value>16384</value></property>
+<property><name>yarn.nodemanager.resource.cpu-vcores</name><value>8</value></property>
+<property><name>yarn.resourcemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.timeline-service.bind-host</name><value>0.0.0.0</value></property>
+</configuration>
diff --git a/docker/conf/hadoop/core-site.xml b/docker/dockerfile/cluster/client/conf/hadoop-write/core-site.xml
similarity index 63%
copy from docker/conf/hadoop/core-site.xml
copy to docker/dockerfile/cluster/client/conf/hadoop-write/core-site.xml
index 6fe6404..69fc462 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/dockerfile/cluster/client/conf/hadoop-write/core-site.xml
@@ -17,13 +17,9 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+<property><name>hadoop.proxyuser.hue.hosts</name><value>*</value></property>
+<property><name>fs.defaultFS</name><value>hdfs://write-namenode:8020</value></property>
+<property><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>hadoop.proxyuser.hue.groups</name><value>*</value></property>
+<property><name>hadoop.http.staticuser.user</name><value>root</value></property>
 </configuration>
diff --git a/docker/dockerfile/cluster/client/conf/hadoop-write/hdfs-site.xml b/docker/dockerfile/cluster/client/conf/hadoop-write/hdfs-site.xml
new file mode 100644
index 0000000..cdf7778
--- /dev/null
+++ b/docker/dockerfile/cluster/client/conf/hadoop-write/hdfs-site.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<property><name>dfs.namenode.name.dir</name><value>file:///hadoop/dfs/name</value></property>
+<property><name>dfs.namenode.datanode.registration.ip-hostname-check</name><value>false</value></property>
+<property><name>dfs.permissions.enabled</name><value>false</value></property>
+<property><name>dfs.webhdfs.enabled</name><value>true</value></property>
+<property><name>dfs.namenode.rpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.servicerpc-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.http-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.namenode.https-bind-host</name><value>0.0.0.0</value></property>
+<property><name>dfs.client.use.datanode.hostname</name><value>true</value></property>
+<property><name>dfs.datanode.use.datanode.hostname</name><value>true</value></property>
+</configuration>
diff --git a/docker/conf/hadoop/core-site.xml b/docker/dockerfile/cluster/client/conf/hadoop-write/mapred-site.xml
similarity index 69%
copy from docker/conf/hadoop/core-site.xml
copy to docker/dockerfile/cluster/client/conf/hadoop-write/mapred-site.xml
index 6fe6404..d5cc450 100644
--- a/docker/conf/hadoop/core-site.xml
+++ b/docker/dockerfile/cluster/client/conf/hadoop-write/mapred-site.xml
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="UTF-8"?>
+<?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,13 +17,6 @@
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
-    </property>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://localhost:9000</value>
-    </property>
+
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
 </configuration>
diff --git a/docker/dockerfile/cluster/client/conf/hadoop-write/yarn-site.xml b/docker/dockerfile/cluster/client/conf/hadoop-write/yarn-site.xml
new file mode 100644
index 0000000..b55dd34
--- /dev/null
+++ b/docker/dockerfile/cluster/client/conf/hadoop-write/yarn-site.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+<!-- Site specific YARN configuration properties -->
+
+<property><name>yarn.resourcemanager.fs.state-store.uri</name><value>/rmstate</value></property>
+<property><name>yarn.timeline-service.generic-application-history.enabled</name><value>true</value></property>
+<property><name>mapreduce.map.output.compress</name><value>true</value></property>
+<property><name>yarn.resourcemanager.recovery.enabled</name><value>true</value></property>
+<property><name>mapred.map.output.compress.codec</name><value>org.apache.hadoop.io.compress.SnappyCodec</value></property>
+<property><name>yarn.timeline-service.enabled</name><value>true</value></property>
+<property><name>yarn.log-aggregation-enable</name><value>true</value></property>
+<property><name>yarn.resourcemanager.store.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value></property>
+<property><name>yarn.resourcemanager.system-metrics-publisher.enabled</name><value>true</value></property>
+<property><name>yarn.nodemanager.remote-app-log-dir</name><value>/app-logs</value></property>
+<property><name>yarn.nodemanager.aux-services</name><value>mapreduce_shuffle</value></property>
+<property><name>yarn.resourcemanager.resource_tracker.address</name><value>write-resourcemanager:8031</value></property>
+<property><name>yarn.resourcemanager.hostname</name><value>write-resourcemanager</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-vcores</name><value>4</value></property>
+<property><name>yarn.timeline-service.hostname</name><value>write-historyserver</value></property>
+<property><name>yarn.scheduler.capacity.root.default.maximum-allocation-mb</name><value>8192</value></property>
+<property><name>yarn.log.server.url</name><value>http://write-historyserver:8188/applicationhistory/logs/</value></property>
+<property><name>yarn.resourcemanager.scheduler.class</name><value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value></property>
+<property><name>yarn.resourcemanager.scheduler.address</name><value>write-resourcemanager:8030</value></property>
+<property><name>yarn.resourcemanager.address</name><value>write-resourcemanager:8032</value></property>
+<property><name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name><value>98.5</value></property>
+<property><name>yarn.nodemanager.resource.memory-mb</name><value>16384</value></property>
+<property><name>yarn.nodemanager.resource.cpu-vcores</name><value>8</value></property>
+<property><name>yarn.resourcemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.nodemanager.bind-host</name><value>0.0.0.0</value></property>
+<property><name>yarn.timeline-service.bind-host</name><value>0.0.0.0</value></property>
+</configuration>
diff --git a/docker/dockerfile/cluster/client/conf/hbase/hbase-site.xml b/docker/dockerfile/cluster/client/conf/hbase/hbase-site.xml
new file mode 100644
index 0000000..988d91c
--- /dev/null
+++ b/docker/dockerfile/cluster/client/conf/hbase/hbase-site.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+<property><name>hbase.zookeeper.quorum</name><value>read-zookeeper</value></property>
+<property><name>hbase.master</name><value>read-hbase-master:16000</value></property>
+<property><name>hbase.regionserver.port</name><value>16020</value></property>
+<property><name>hbase.regionserver.info.port</name><value>16030</value></property>
+<property><name>DIR</name><value>/etc/hbase</value></property>
+<property><name>hbase.cluster.distributed</name><value>true</value></property>
+<property><name>hbase.rootdir</name><value>hdfs://read-namenode:8020/hbase</value></property>
+<property><name>hbase.master.info.port</name><value>16010</value></property>
+<property><name>hbase.master.hostname</name><value>read-hbase-master</value></property>
+<property><name>hbase.master.port</name><value>16000</value></property>
+</configuration>
diff --git a/docker/dockerfile/cluster/client/conf/hive/hive-site.xml b/docker/dockerfile/cluster/client/conf/hive/hive-site.xml
new file mode 100644
index 0000000..c60fe36
--- /dev/null
+++ b/docker/dockerfile/cluster/client/conf/hive/hive-site.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--><configuration>
+    <property><name>hive.metastore.uris</name><value>thrift://write-hive-metastore:9083</value></property>
+    <property><name>datanucleus.autoCreateSchema</name><value>false</value></property>
+    <property><name>javax.jdo.option.ConnectionURL</name><value>jdbc:postgresql://write-hive-metastore-postgresql/metastore</value></property>
+    <property><name>javax.jdo.option.ConnectionDriverName</name><value>org.postgresql.Driver</value></property>
+    <property><name>javax.jdo.option.ConnectionPassword</name><value>hive</value></property>
+    <property><name>javax.jdo.option.ConnectionUserName</name><value>hive</value></property>
+</configuration>
+
diff --git a/docker/dockerfile/cluster/client/entrypoint.sh b/docker/dockerfile/cluster/client/entrypoint.sh
new file mode 100644
index 0000000..dddc072
--- /dev/null
+++ b/docker/dockerfile/cluster/client/entrypoint.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+/opt/entrypoint/hadoop/entrypoint.sh
+/opt/entrypoint/hive/entrypoint.sh
+/opt/entrypoint/hbase/entrypoint.sh
+#/opt/entrypoint/zookeeper/entrypoint.sh
+#/opt/entrypoint/kafka/entrypoint.sh
diff --git a/docker/dockerfile/cluster/client/run_cli.sh b/docker/dockerfile/cluster/client/run_cli.sh
new file mode 100644
index 0000000..371c3e1
--- /dev/null
+++ b/docker/dockerfile/cluster/client/run_cli.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+/opt/entrypoint/hadoop/entrypoint.sh
+/opt/entrypoint/hive/entrypoint.sh
+/opt/entrypoint/hbase/entrypoint.sh
+
+while :
+do
+    sleep 1000
+done
\ No newline at end of file
diff --git a/docker/build_image.sh b/docker/dockerfile/cluster/datanode/Dockerfile
old mode 100755
new mode 100644
similarity index 70%
copy from docker/build_image.sh
copy to docker/dockerfile/cluster/datanode/Dockerfile
index 9c0b925..54bbc10
--- a/docker/build_image.sh
+++ b/docker/dockerfile/cluster/datanode/Dockerfile
@@ -1,5 +1,3 @@
-#!/usr/bin/env bash
-
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -17,11 +15,17 @@
 # limitations under the License.
 #
 
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd ${DIR}
-echo "build image in dir "${DIR}
+ARG HADOOP_VERSION=2.8.5
+ARG HADOOP_DN_PORT=50075
+FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+
+ENV HADOOP_DN_PORT ${HADOOP_DN_PORT}
+
+ENV HDFS_CONF_dfs_datanode_data_dir=file:///hadoop/dfs/data
+RUN mkdir -p /hadoop/dfs/data
+VOLUME /hadoop/dfs/data
 
+ADD run_dn.sh /run_dn.sh
+RUN chmod a+x /run_dn.sh
 
-echo "start build Hadoop docker image"
-docker build -f Dockerfile_hadoop -t hadoop2.7-all-in-one-for-kylin4 .
-docker build -f Dockerfile -t apachekylin/apache-kylin-standalone:4.0.0-alpha .
+CMD ["/run_dn.sh"]
diff --git a/docker/run_container.sh b/docker/dockerfile/cluster/datanode/run_dn.sh
old mode 100755
new mode 100644
similarity index 76%
copy from docker/run_container.sh
copy to docker/dockerfile/cluster/datanode/run_dn.sh
index 3ed32ce..f3208ef
--- a/docker/run_container.sh
+++ b/docker/dockerfile/cluster/datanode/run_dn.sh
@@ -1,3 +1,5 @@
+#!/bin/bash
+
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -15,12 +17,10 @@
 # limitations under the License.
 #
 
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+datadir=`echo $HDFS_CONF_dfs_datanode_data_dir | perl -pe 's#file://##'`
+if [ ! -d $datadir ]; then
+  echo "Datanode data directory not found: $datadir"
+  exit 2
+fi
+
+$HADOOP_PREFIX/bin/hdfs --config $HADOOP_CONF_DIR datanode
diff --git a/docker/dockerfile/cluster/hbase/Dockerfile b/docker/dockerfile/cluster/hbase/Dockerfile
new file mode 100644
index 0000000..9b92d56
--- /dev/null
+++ b/docker/dockerfile/cluster/hbase/Dockerfile
@@ -0,0 +1,59 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+FROM centos:7.3.1611
+MAINTAINER kylin
+USER root
+
+ARG JAVA_VERSION=jdk1.8.0_141
+ARG HBASE_VERSION=1.1.2
+ARG HBASE_URL=https://archive.apache.org/dist/hbase/$HBASE_VERSION/hbase-$HBASE_VERSION-bin.tar.gz
+
+ENV JAVA_HOME /opt/${JAVA_VERSION}
+ENV HBASE_VERSION ${HBASE_VERSION}
+ENV HBASE_URL ${HBASE_URL}
+
+# install tools
+RUN yum -y install lsof wget tar git unzip wget curl net-tools procps perl sed nc which
+
+# setup jdk
+RUN wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u141-b15/336fa29ff2bb4ef291e347e091f7f4a7/jdk-8u141-linux-x64.tar.gz" -P /opt \
+    && tar -zxvf /opt/jdk-8u141-linux-x64.tar.gz -C /opt/ \
+    && rm -f /opt/jdk-8u141-linux-x64.tar.gz
+
+RUN set -x \
+    && curl -fSL "$HBASE_URL" -o /tmp/hbase.tar.gz \
+    && curl -fSL "$HBASE_URL.asc" -o /tmp/hbase.tar.gz.asc \
+    && tar -xvf /tmp/hbase.tar.gz -C /opt/ \
+    && rm /tmp/hbase.tar.gz*
+
+RUN ln -s /opt/hbase-$HBASE_VERSION/conf /etc/hbase
+RUN mkdir /opt/hbase-$HBASE_VERSION/logs
+
+RUN mkdir /hadoop-data
+
+ENV HBASE_PREFIX=/opt/hbase-$HBASE_VERSION
+ENV HBASE_HOME=${HBASE_PREFIX}
+ENV HBASE_CONF_DIR=/etc/hbase
+
+ENV USER=root
+ENV PATH $JAVA_HOME/bin:$HBASE_PREFIX/bin/:$PATH
+
+ADD entrypoint.sh /opt/entrypoint/hbase/entrypoint.sh
+RUN chmod a+x /opt/entrypoint/hbase/entrypoint.sh
+
+ENTRYPOINT ["/opt/entrypoint/hbase/entrypoint.sh"]
diff --git a/docker/dockerfile/cluster/hbase/entrypoint.sh b/docker/dockerfile/cluster/hbase/entrypoint.sh
new file mode 100644
index 0000000..5aea8d9
--- /dev/null
+++ b/docker/dockerfile/cluster/hbase/entrypoint.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+function addProperty() {
+  local path=$1
+  local name=$2
+  local value=$3
+
+  local entry="<property><name>$name</name><value>${value}</value></property>"
+  local escapedEntry=$(echo $entry | sed 's/\//\\\//g')
+  sed -i "/<\/configuration>/ s/.*/${escapedEntry}\n&/" $path
+}
+
+function configure() {
+    local path=$1
+    local module=$2
+    local envPrefix=$3
+
+    local var
+    local value
+
+    echo "Configuring $module"
+    for c in `printenv | perl -sne 'print "$1 " if m/^${envPrefix}_(.+?)=.*/' -- -envPrefix=$envPrefix`; do
+        name=`echo ${c} | perl -pe 's/___/-/g; s/__/_/g; s/_/./g'`
+        var="${envPrefix}_${c}"
+        value=${!var}
+        echo " - Setting $name=$value"
+        addProperty /etc/hbase/$module-site.xml $name "$value"
+    done
+}
+
+configure /etc/hbase/hbase-site.xml hbase HBASE_CONF
+
+function wait_for_it()
+{
+    local serviceport=$1
+    local service=${serviceport%%:*}
+    local port=${serviceport#*:}
+    local retry_seconds=5
+    local max_try=100
+    let i=1
+
+    nc -z $service $port
+    result=$?
+
+    until [ $result -eq 0 ]; do
+      echo "[$i/$max_try] check for ${service}:${port}..."
+      echo "[$i/$max_try] ${service}:${port} is not available yet"
+      if (( $i == $max_try )); then
+        echo "[$i/$max_try] ${service}:${port} is still not available; giving up after ${max_try} tries. :/"
+        exit 1
+      fi
+
+      echo "[$i/$max_try] try in ${retry_seconds}s once again ..."
+      let "i++"
+      sleep $retry_seconds
+
+      nc -z $service $port
+      result=$?
+    done
+    echo "[$i/$max_try] $service:${port} is available."
+}
+
+for i in "${SERVICE_PRECONDITION[@]}"
+do
+    wait_for_it ${i}
+done
+
+exec $@
diff --git a/docker/build_image.sh b/docker/dockerfile/cluster/historyserver/Dockerfile
old mode 100755
new mode 100644
similarity index 60%
copy from docker/build_image.sh
copy to docker/dockerfile/cluster/historyserver/Dockerfile
index 9c0b925..2adda43
--- a/docker/build_image.sh
+++ b/docker/dockerfile/cluster/historyserver/Dockerfile
@@ -1,5 +1,3 @@
-#!/usr/bin/env bash
-
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -17,11 +15,20 @@
 # limitations under the License.
 #
 
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd ${DIR}
-echo "build image in dir "${DIR}
+ARG HADOOP_VERSION=2.8.5
+FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+
+ARG HADOOP_HISTORY_PORT=8188
+ENV HADOOP_HISTORY_PORT ${HADOOP_HISTORY_PORT}
+EXPOSE ${HADOOP_HISTORY_PORT}
+
+HEALTHCHECK CMD curl -f http://localhost:${HADOOP_HISTORY_PORT}/ || exit 1
+
+ENV YARN_CONF_yarn_timeline___service_leveldb___timeline___store_path=/hadoop/yarn/timeline
+RUN mkdir -p /hadoop/yarn/timeline
+VOLUME /hadoop/yarn/timeline
 
+ADD run_history.sh /run_history.sh
+RUN chmod a+x /run_history.sh
 
-echo "start build Hadoop docker image"
-docker build -f Dockerfile_hadoop -t hadoop2.7-all-in-one-for-kylin4 .
-docker build -f Dockerfile -t apachekylin/apache-kylin-standalone:4.0.0-alpha .
+CMD ["/run_history.sh"]
diff --git a/docker/run_container.sh b/docker/dockerfile/cluster/historyserver/run_history.sh
old mode 100755
new mode 100644
similarity index 82%
copy from docker/run_container.sh
copy to docker/dockerfile/cluster/historyserver/run_history.sh
index 3ed32ce..6d7ae4e
--- a/docker/run_container.sh
+++ b/docker/dockerfile/cluster/historyserver/run_history.sh
@@ -1,3 +1,5 @@
+#!/bin/bash
+
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -15,12 +17,4 @@
 # limitations under the License.
 #
 
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+$HADOOP_PREFIX/bin/yarn --config $HADOOP_CONF_DIR historyserver
diff --git a/docker/dockerfile/cluster/hive/Dockerfile b/docker/dockerfile/cluster/hive/Dockerfile
new file mode 100644
index 0000000..46f81f4
--- /dev/null
+++ b/docker/dockerfile/cluster/hive/Dockerfile
@@ -0,0 +1,73 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ARG HADOOP_VERSION=2.8.5
+FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+
+ENV HIVE_HOME /opt/hive
+ENV HADOOP_HOME /opt/hadoop-$HADOOP_VERSION
+
+WORKDIR /opt
+
+ARG HIVE_VERSION=1.2.1
+ARG HIVE_URL=https://archive.apache.org/dist/hive/hive-$HIVE_VERSION/apache-hive-$HIVE_VERSION-bin.tar.gz
+ENV HIVE_VERSION ${HIVE_VERSION}
+ENV HIVE_URL ${HIVE_URL}
+
+ARG MYSQL_CONN_VERSION=8.0.20
+ENV MYSQL_CONN_VERSION=${MYSQL_CONN_VERSION}
+ARG MYSQL_CONN_URL=https://downloads.mysql.com/archives/get/p/3/file/mysql-connector-java-${MYSQL_CONN_VERSION}.tar.gz
+ENV MYSQL_CONN_URL=${MYSQL_CONN_URL}
+
+# install tools
+RUN yum -y install lsof wget tar git unzip wget curl net-tools procps perl sed nc which
+
+#Install Hive MySQL, PostgreSQL JDBC
+RUN echo "Hive URL is :${HIVE_URL}" \
+    && wget ${HIVE_URL} -O hive.tar.gz \
+    && tar -xzvf hive.tar.gz \
+    && mv *hive*-bin hive \
+    && wget $MYSQL_CONN_URL -O /tmp/mysql-connector-java.tar.gz \
+    && tar -xzvf /tmp/mysql-connector-java.tar.gz -C /tmp/ \
+    && cp /tmp/mysql-connector-java-${MYSQL_CONN_VERSION}/mysql-connector-java-${MYSQL_CONN_VERSION}.jar $HIVE_HOME/lib/mysql-connector-java.jar \
+    && rm /tmp/mysql-connector-java.tar.gz \
+    && rm -rf /tmp/mysql-connector-java-${MYSQL_CONN_VERSION} \
+    && wget https://jdbc.postgresql.org/download/postgresql-9.4.1212.jar -O $HIVE_HOME/lib/postgresql-jdbc.jar \
+    && rm hive.tar.gz
+
+#Custom configuration goes here
+ADD conf/hive-site.xml $HIVE_HOME/conf
+ADD conf/beeline-log4j2.properties $HIVE_HOME/conf
+ADD conf/hive-env.sh $HIVE_HOME/conf
+ADD conf/hive-exec-log4j2.properties $HIVE_HOME/conf
+ADD conf/hive-log4j2.properties $HIVE_HOME/conf
+ADD conf/ivysettings.xml $HIVE_HOME/conf
+ADD conf/llap-daemon-log4j2.properties $HIVE_HOME/conf
+
+COPY run_hv.sh /run_hv.sh
+RUN chmod +x /run_hv.sh
+
+COPY entrypoint.sh /opt/entrypoint/hive/entrypoint.sh
+RUN chmod +x /opt/entrypoint/hive/entrypoint.sh
+
+ENV PATH $HIVE_HOME/bin/:$PATH
+
+EXPOSE 10000
+EXPOSE 10002
+
+ENTRYPOINT ["/opt/entrypoint/hive/entrypoint.sh"]
+CMD ["/run_hv.sh"]
diff --git a/docker/dockerfile/cluster/hive/conf/beeline-log4j2.properties b/docker/dockerfile/cluster/hive/conf/beeline-log4j2.properties
new file mode 100644
index 0000000..d1305f8
--- /dev/null
+++ b/docker/dockerfile/cluster/hive/conf/beeline-log4j2.properties
@@ -0,0 +1,46 @@
+
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = INFO
+name = BeelineLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+# list of properties
+property.hive.log.level = WARN
+property.hive.root.logger = console
+
+# list of all appenders
+appenders = console
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+
+# list of all loggers
+loggers = HiveConnection
+
+# HiveConnection logs useful info for dynamic service discovery
+logger.HiveConnection.name = org.apache.hive.jdbc.HiveConnection
+logger.HiveConnection.level = INFO
+
+# root logger
+rootLogger.level = ${sys:hive.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
diff --git a/docker/dockerfile/cluster/hive/conf/hive-env.sh b/docker/dockerfile/cluster/hive/conf/hive-env.sh
new file mode 100644
index 0000000..f22407c
--- /dev/null
+++ b/docker/dockerfile/cluster/hive/conf/hive-env.sh
@@ -0,0 +1,55 @@
+
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hive and Hadoop environment variables here. These variables can be used
+# to control the execution of Hive. It should be used by admins to configure
+# the Hive installation (so that users do not have to set environment variables
+# or set command line parameters to get correct behavior).
+#
+# The hive service being invoked (CLI/HWI etc.) is available via the environment
+# variable SERVICE
+
+
+# Hive Client memory usage can be an issue if a large number of clients
+# are running at the same time. The flags below have been useful in 
+# reducing memory usage:
+#
+# if [ "$SERVICE" = "cli" ]; then
+#   if [ -z "$DEBUG" ]; then
+#     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+#   else
+#     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+#   fi
+# fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+#
+# export HADOOP_HEAPSIZE=1024
+#
+# Larger heap size may be required when running queries over large number of files or partitions. 
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be 
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+# HADOOP_HOME=${bin}/../../hadoop
+
+# Hive Configuration Directory can be controlled by:
+# export HIVE_CONF_DIR=
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+# export HIVE_AUX_JARS_PATH=
diff --git a/docker/dockerfile/cluster/hive/conf/hive-exec-log4j2.properties b/docker/dockerfile/cluster/hive/conf/hive-exec-log4j2.properties
new file mode 100644
index 0000000..a1e50eb
--- /dev/null
+++ b/docker/dockerfile/cluster/hive/conf/hive-exec-log4j2.properties
@@ -0,0 +1,67 @@
+
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = INFO
+name = HiveExecLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+# list of properties
+property.hive.log.level = INFO
+property.hive.root.logger = FA
+property.hive.query.id = hadoop
+property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
+property.hive.log.file = ${sys:hive.query.id}.log
+
+# list of all appenders
+appenders = console, FA
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+
+# simple file appender
+appender.FA.type = File
+appender.FA.name = FA
+appender.FA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
+appender.FA.layout.type = PatternLayout
+appender.FA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
+
+# list of all loggers
+loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX
+
+logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+logger.NIOServerCnxn.level = WARN
+
+logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+logger.ClientCnxnSocketNIO.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+# root logger
+rootLogger.level = ${sys:hive.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
diff --git a/docker/dockerfile/cluster/hive/conf/hive-log4j2.properties b/docker/dockerfile/cluster/hive/conf/hive-log4j2.properties
new file mode 100644
index 0000000..5e5ce02
--- /dev/null
+++ b/docker/dockerfile/cluster/hive/conf/hive-log4j2.properties
@@ -0,0 +1,74 @@
+
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = INFO
+name = HiveLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+# list of properties
+property.hive.log.level = INFO
+property.hive.root.logger = DRFA
+property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
+property.hive.log.file = hive.log
+
+# list of all appenders
+appenders = console, DRFA
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+
+# daily rolling file appender
+appender.DRFA.type = RollingFile
+appender.DRFA.name = DRFA
+appender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
+# Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session
+appender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}
+appender.DRFA.layout.type = PatternLayout
+appender.DRFA.layout.pattern = %d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
+appender.DRFA.policies.type = Policies
+appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy
+appender.DRFA.policies.time.interval = 1
+appender.DRFA.policies.time.modulate = true
+appender.DRFA.strategy.type = DefaultRolloverStrategy
+appender.DRFA.strategy.max = 30
+
+# list of all loggers
+loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX
+
+logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+logger.NIOServerCnxn.level = WARN
+
+logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+logger.ClientCnxnSocketNIO.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+# root logger
+rootLogger.level = ${sys:hive.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
diff --git a/docker/dockerfile/cluster/hive/conf/hive-site.xml b/docker/dockerfile/cluster/hive/conf/hive-site.xml
new file mode 100644
index 0000000..60f3935
--- /dev/null
+++ b/docker/dockerfile/cluster/hive/conf/hive-site.xml
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--><configuration>
+</configuration>
diff --git a/docker/dockerfile/cluster/hive/conf/ivysettings.xml b/docker/dockerfile/cluster/hive/conf/ivysettings.xml
new file mode 100644
index 0000000..d1b7819
--- /dev/null
+++ b/docker/dockerfile/cluster/hive/conf/ivysettings.xml
@@ -0,0 +1,44 @@
+<!--This file is used by grapes to download dependencies from a maven repository.
+    This is just a template and can be edited to add more repositories.
+-->
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<ivysettings>
+  <!--name of the defaultResolver should always be 'downloadGrapes'. -->
+  <settings defaultResolver="downloadGrapes"/>
+  <!-- Only set maven.local.repository if not already set -->
+  <property name="maven.local.repository" value="${user.home}/.m2/repository" override="false" />
+  <property name="m2-pattern"
+            value="file:${maven.local.repository}/[organisation]/[module]/[revision]/[module]-[revision](-[classifier]).[ext]"
+            override="false"/>
+  <resolvers>
+    <!-- more resolvers can be added here -->
+    <chain name="downloadGrapes">
+      <!-- This resolver uses ibiblio to find artifacts, compatible with maven2 repository -->
+      <ibiblio name="central" m2compatible="true"/>
+      <url name="local-maven2" m2compatible="true">
+        <artifact pattern="${m2-pattern}"/>
+      </url>
+      <!-- File resolver to add jars from the local system. -->
+      <filesystem name="test" checkmodified="true">
+        <artifact pattern="/tmp/[module]-[revision](-[classifier]).jar"/>
+      </filesystem>
+
+    </chain>
+  </resolvers>
+</ivysettings>
diff --git a/docker/dockerfile/cluster/hive/conf/llap-daemon-log4j2.properties b/docker/dockerfile/cluster/hive/conf/llap-daemon-log4j2.properties
new file mode 100644
index 0000000..f1b72eb
--- /dev/null
+++ b/docker/dockerfile/cluster/hive/conf/llap-daemon-log4j2.properties
@@ -0,0 +1,94 @@
+
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = INFO
+name = LlapDaemonLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+# list of properties
+property.llap.daemon.log.level = INFO
+property.llap.daemon.root.logger = console
+property.llap.daemon.log.dir = .
+property.llap.daemon.log.file = llapdaemon.log
+property.llap.daemon.historylog.file = llapdaemon_history.log
+property.llap.daemon.log.maxfilesize = 256MB
+property.llap.daemon.log.maxbackupindex = 20
+
+# list of all appenders
+appenders = console, RFA, HISTORYAPPENDER
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t%x] %p %c{2} : %m%n
+
+# rolling file appender
+appender.RFA.type = RollingFile
+appender.RFA.name = RFA
+appender.RFA.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}
+appender.RFA.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}_%i
+appender.RFA.layout.type = PatternLayout
+appender.RFA.layout.pattern = %d{ISO8601} %-5p [%t%x]: %c{2} (%F:%M(%L)) - %m%n
+appender.RFA.policies.type = Policies
+appender.RFA.policies.size.type = SizeBasedTriggeringPolicy
+appender.RFA.policies.size.size = ${sys:llap.daemon.log.maxfilesize}
+appender.RFA.strategy.type = DefaultRolloverStrategy
+appender.RFA.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
+
+# history file appender
+appender.HISTORYAPPENDER.type = RollingFile
+appender.HISTORYAPPENDER.name = HISTORYAPPENDER
+appender.HISTORYAPPENDER.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}
+appender.HISTORYAPPENDER.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}_%i
+appender.HISTORYAPPENDER.layout.type = PatternLayout
+appender.HISTORYAPPENDER.layout.pattern = %m%n
+appender.HISTORYAPPENDER.policies.type = Policies
+appender.HISTORYAPPENDER.policies.size.type = SizeBasedTriggeringPolicy
+appender.HISTORYAPPENDER.policies.size.size = ${sys:llap.daemon.log.maxfilesize}
+appender.HISTORYAPPENDER.strategy.type = DefaultRolloverStrategy
+appender.HISTORYAPPENDER.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
+
+# list of all loggers
+loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, HistoryLogger
+
+logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+logger.NIOServerCnxn.level = WARN
+
+logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+logger.ClientCnxnSocketNIO.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+logger.HistoryLogger.name = org.apache.hadoop.hive.llap.daemon.HistoryLogger
+logger.HistoryLogger.level = INFO
+logger.HistoryLogger.additivity = false
+logger.HistoryLogger.appenderRefs = HistoryAppender
+logger.HistoryLogger.appenderRef.HistoryAppender.ref = HISTORYAPPENDER
+
+# root logger
+rootLogger.level = ${sys:llap.daemon.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:llap.daemon.root.logger}
diff --git a/docker/dockerfile/cluster/hive/entrypoint.sh b/docker/dockerfile/cluster/hive/entrypoint.sh
new file mode 100644
index 0000000..d6a888c
--- /dev/null
+++ b/docker/dockerfile/cluster/hive/entrypoint.sh
@@ -0,0 +1,136 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set some sensible defaults
+export CORE_CONF_fs_defaultFS=${CORE_CONF_fs_defaultFS:-hdfs://`hostname -f`:8020}
+
+function addProperty() {
+  local path=$1
+  local name=$2
+  local value=$3
+
+  local entry="<property><name>$name</name><value>${value}</value></property>"
+  local escapedEntry=$(echo $entry | sed 's/\//\\\//g')
+  sed -i "/<\/configuration>/ s/.*/${escapedEntry}\n&/" $path
+}
+
+function configure() {
+    local path=$1
+    local module=$2
+    local envPrefix=$3
+
+    local var
+    local value
+    
+    echo "Configuring $module"
+    for c in `printenv | perl -sne 'print "$1 " if m/^${envPrefix}_(.+?)=.*/' -- -envPrefix=$envPrefix`; do 
+        name=`echo ${c} | perl -pe 's/___/-/g; s/__/_/g; s/_/./g'`
+        var="${envPrefix}_${c}"
+        value=${!var}
+        echo " - Setting $name=$value"
+        addProperty $path $name "$value"
+    done
+}
+
+configure /etc/hadoop/core-site.xml core CORE_CONF
+configure /etc/hadoop/hdfs-site.xml hdfs HDFS_CONF
+configure /etc/hadoop/yarn-site.xml yarn YARN_CONF
+configure /etc/hadoop/httpfs-site.xml httpfs HTTPFS_CONF
+configure /etc/hadoop/kms-site.xml kms KMS_CONF
+configure /etc/hadoop/mapred-site.xml mapred MAPRED_CONF
+configure /etc/hadoop/hive-site.xml hive HIVE_SITE_CONF
+configure /opt/hive/conf/hive-site.xml hive HIVE_SITE_CONF
+
+if [ "$MULTIHOMED_NETWORK" = "1" ]; then
+    echo "Configuring for multihomed network"
+
+    # HDFS
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.rpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.servicerpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.http-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.https-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.client.use.datanode.hostname true
+    addProperty /etc/hadoop/hdfs-site.xml dfs.datanode.use.datanode.hostname true
+
+    # YARN
+    addProperty /etc/hadoop/yarn-site.xml yarn.resourcemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.timeline-service.bind-host 0.0.0.0
+
+    # MAPRED
+    addProperty /etc/hadoop/mapred-site.xml yarn.nodemanager.bind-host 0.0.0.0
+fi
+
+if [ -n "$GANGLIA_HOST" ]; then
+    mv /etc/hadoop/hadoop-metrics.properties /etc/hadoop/hadoop-metrics.properties.orig
+    mv /etc/hadoop/hadoop-metrics2.properties /etc/hadoop/hadoop-metrics2.properties.orig
+
+    for module in mapred jvm rpc ugi; do
+        echo "$module.class=org.apache.hadoop.metrics.ganglia.GangliaContext31"
+        echo "$module.period=10"
+        echo "$module.servers=$GANGLIA_HOST:8649"
+    done > /etc/hadoop/hadoop-metrics.properties
+    
+    for module in namenode datanode resourcemanager nodemanager mrappmaster jobhistoryserver; do
+        echo "$module.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31"
+        echo "$module.sink.ganglia.period=10"
+        echo "$module.sink.ganglia.supportsparse=true"
+        echo "$module.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both"
+        echo "$module.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40"
+        echo "$module.sink.ganglia.servers=$GANGLIA_HOST:8649"
+    done > /etc/hadoop/hadoop-metrics2.properties
+fi
+
+function wait_for_it()
+{
+    local serviceport=$1
+    local service=${serviceport%%:*}
+    local port=${serviceport#*:}
+    local retry_seconds=5
+    local max_try=100
+    let i=1
+
+    nc -z $service $port
+    result=$?
+
+    until [ $result -eq 0 ]; do
+      echo "[$i/$max_try] check for ${service}:${port}..."
+      echo "[$i/$max_try] ${service}:${port} is not available yet"
+      if (( $i == $max_try )); then
+        echo "[$i/$max_try] ${service}:${port} is still not available; giving up after ${max_try} tries. :/"
+        exit 1
+      fi
+      
+      echo "[$i/$max_try] try in ${retry_seconds}s once again ..."
+      let "i++"
+      sleep $retry_seconds
+
+      nc -z $service $port
+      result=$?
+    done
+    echo "[$i/$max_try] $service:${port} is available."
+}
+
+for i in ${SERVICE_PRECONDITION[@]}
+do
+    wait_for_it ${i}
+done
+
+exec $@
diff --git a/docker/run_container.sh b/docker/dockerfile/cluster/hive/run_hv.sh
old mode 100755
new mode 100644
similarity index 77%
copy from docker/run_container.sh
copy to docker/dockerfile/cluster/hive/run_hv.sh
index 3ed32ce..675937f
--- a/docker/run_container.sh
+++ b/docker/dockerfile/cluster/hive/run_hv.sh
@@ -1,3 +1,5 @@
+#!/bin/bash
+
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -15,12 +17,10 @@
 # limitations under the License.
 #
 
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+hadoop fs -mkdir       /tmp
+hadoop fs -mkdir -p    /user/hive/warehouse
+hadoop fs -chmod g+w   /tmp
+hadoop fs -chmod g+w   /user/hive/warehouse
+
+cd $HIVE_HOME/bin
+./hiveserver2 --hiveconf hive.server2.enable.doAs=false
diff --git a/docker/dockerfile/cluster/hmaster/Dockerfile b/docker/dockerfile/cluster/hmaster/Dockerfile
new file mode 100644
index 0000000..09aa0e3
--- /dev/null
+++ b/docker/dockerfile/cluster/hmaster/Dockerfile
@@ -0,0 +1,13 @@
+
+
+ARG HBASE_VERSION=1.1.2
+
+FROM apachekylin/kylin-hbase-base:hbase_${HBASE_VERSION}
+
+ENV HBASE_VERSION ${HBASE_VERSION}
+COPY run_hm.sh /run_hm.sh
+RUN chmod +x /run_hm.sh
+
+EXPOSE 16000 16010
+
+CMD ["/run_hm.sh"]
diff --git a/docker/run_container.sh b/docker/dockerfile/cluster/hmaster/run_hm.sh
old mode 100755
new mode 100644
similarity index 82%
copy from docker/run_container.sh
copy to docker/dockerfile/cluster/hmaster/run_hm.sh
index 3ed32ce..1b1cda5
--- a/docker/run_container.sh
+++ b/docker/dockerfile/cluster/hmaster/run_hm.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -14,13 +15,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+/opt/hbase-$HBASE_VERSION/bin/hbase master start
diff --git a/docker/dockerfile/cluster/hregionserver/Dockerfile b/docker/dockerfile/cluster/hregionserver/Dockerfile
new file mode 100644
index 0000000..aaced16
--- /dev/null
+++ b/docker/dockerfile/cluster/hregionserver/Dockerfile
@@ -0,0 +1,12 @@
+ARG HBASE_VERSION=1.1.2
+
+FROM apachekylin/kylin-hbase-base:hbase_${HBASE_VERSION}
+
+ENV HBASE_VERSION ${HBASE_VERSION}
+
+COPY run_hr.sh /run_hr.sh
+RUN chmod +x /run_hr.sh
+
+EXPOSE 16020 16030
+
+CMD ["/run_hr.sh"]
diff --git a/docker/run_container.sh b/docker/dockerfile/cluster/hregionserver/run_hr.sh
old mode 100755
new mode 100644
similarity index 82%
copy from docker/run_container.sh
copy to docker/dockerfile/cluster/hregionserver/run_hr.sh
index 3ed32ce..953ad43
--- a/docker/run_container.sh
+++ b/docker/dockerfile/cluster/hregionserver/run_hr.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -14,13 +15,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+/opt/hbase-$HBASE_VERSION/bin/hbase regionserver start
diff --git a/docker/build_image.sh b/docker/dockerfile/cluster/kerberos/Dockerfile
old mode 100755
new mode 100644
similarity index 63%
copy from docker/build_image.sh
copy to docker/dockerfile/cluster/kerberos/Dockerfile
index 9c0b925..bc46f23
--- a/docker/build_image.sh
+++ b/docker/dockerfile/cluster/kerberos/Dockerfile
@@ -1,5 +1,3 @@
-#!/usr/bin/env bash
-
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -17,11 +15,21 @@
 # limitations under the License.
 #
 
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd ${DIR}
-echo "build image in dir "${DIR}
+FROM centos:7.3.1611
+MAINTAINER kylin
+
+USER root
+
+# install tools
+RUN yum -y install lsof wget tar git unzip wget curl net-tools procps perl sed nc which
+# install kerberos
+RUN yum -y install krb5-server krb5-libs krb5-auth-dialog krb5-workstation
+
+COPY conf/kadm5.acl  /var/kerberos/krb5kdc/kadm5.acl
+COPY conf/kdc.conf /var/kerberos/krb5kdc/kdc.conf
+COPY conf/krb5.conf /etc/krb5.conf
 
+ADD run_krb.sh /run_krb.sh
+RUN chmod a+x /run_krb.sh
 
-echo "start build Hadoop docker image"
-docker build -f Dockerfile_hadoop -t hadoop2.7-all-in-one-for-kylin4 .
-docker build -f Dockerfile -t apachekylin/apache-kylin-standalone:4.0.0-alpha .
+CMD ["/run_krb.sh"]
\ No newline at end of file
diff --git a/docker/dockerfile/cluster/kerberos/conf/kadm5.acl b/docker/dockerfile/cluster/kerberos/conf/kadm5.acl
new file mode 100644
index 0000000..47c8885
--- /dev/null
+++ b/docker/dockerfile/cluster/kerberos/conf/kadm5.acl
@@ -0,0 +1 @@
+*/kylin@KYLIN.COM	*
\ No newline at end of file
diff --git a/docker/build_image.sh b/docker/dockerfile/cluster/kerberos/conf/kdc.conf
old mode 100755
new mode 100644
similarity index 64%
copy from docker/build_image.sh
copy to docker/dockerfile/cluster/kerberos/conf/kdc.conf
index 9c0b925..aa3e6b6
--- a/docker/build_image.sh
+++ b/docker/dockerfile/cluster/kerberos/conf/kdc.conf
@@ -1,5 +1,3 @@
-#!/usr/bin/env bash
-
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -17,11 +15,15 @@
 # limitations under the License.
 #
 
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd ${DIR}
-echo "build image in dir "${DIR}
-
+[kdcdefaults]
+kdc_ports = 88
+kdc_tcp_ports = 88
 
-echo "start build Hadoop docker image"
-docker build -f Dockerfile_hadoop -t hadoop2.7-all-in-one-for-kylin4 .
-docker build -f Dockerfile -t apachekylin/apache-kylin-standalone:4.0.0-alpha .
+[realms]
+CTYUN.COM = {
+ #master_key_type = aes256-cts
+ acl_file = /var/kerberos/krb5kdc/kadm5.acl
+ dict_file = /usr/share/dict/words
+ admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
+ supported_enctypes = aes256-cts:normal aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal camellia256-cts:normal camellia128-cts:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
+}
\ No newline at end of file
diff --git a/docker/build_image.sh b/docker/dockerfile/cluster/kerberos/conf/krb5.conf
old mode 100755
new mode 100644
similarity index 59%
copy from docker/build_image.sh
copy to docker/dockerfile/cluster/kerberos/conf/krb5.conf
index 9c0b925..2f50c9c
--- a/docker/build_image.sh
+++ b/docker/dockerfile/cluster/kerberos/conf/krb5.conf
@@ -1,5 +1,3 @@
-#!/usr/bin/env bash
-
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -17,11 +15,29 @@
 # limitations under the License.
 #
 
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd ${DIR}
-echo "build image in dir "${DIR}
+includedir /etc/krb5.conf.d/
+
+[logging]
+ default = FILE:/var/log/krb5libs.log
+ kdc = FILE:/var/log/krb5kdc.log
+ admin_server = FILE:/var/log/kadmind.log
+
+[libdefaults]
+ dns_lookup_realm = false
+ ticket_lifetime = 24h
+ renew_lifetime = 7d
+ forwardable = true
+ rdns = false
+ pkinit_anchors = /etc/pki/tls/certs/ca-bundle.crt
+ default_realm = KYLIN.COM
+ default_ccache_name = KEYRING:persistent:%{uid}
 
+[realms]
+ CTYUN.COM = {
+  kdc = host-203
+  admin_server = host-203
+ }
 
-echo "start build Hadoop docker image"
-docker build -f Dockerfile_hadoop -t hadoop2.7-all-in-one-for-kylin4 .
-docker build -f Dockerfile -t apachekylin/apache-kylin-standalone:4.0.0-alpha .
+[domain_realm]
+ .ctyun.com = KYLIN.COM
+ ctyun.com = KYLIN.COM
\ No newline at end of file
diff --git a/docker/run_container.sh b/docker/dockerfile/cluster/kerberos/run_krb.sh
old mode 100755
new mode 100644
similarity index 82%
copy from docker/run_container.sh
copy to docker/dockerfile/cluster/kerberos/run_krb.sh
index 3ed32ce..a2a0ab8
--- a/docker/run_container.sh
+++ b/docker/dockerfile/cluster/kerberos/run_krb.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -15,12 +16,11 @@
 # limitations under the License.
 #
 
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+kdb5_util create -s -r KYLIN.COM
+systemctl start krb5kdc kadmin
+systemctl enable krb5kdc kadmin
+
+while :
+do
+    sleep 1000
+done
diff --git a/docker/run_container.sh b/docker/dockerfile/cluster/kylin/Dockerfile
old mode 100755
new mode 100644
similarity index 75%
copy from docker/run_container.sh
copy to docker/dockerfile/cluster/kylin/Dockerfile
index 3ed32ce..2bd4a1b
--- a/docker/run_container.sh
+++ b/docker/dockerfile/cluster/kylin/Dockerfile
@@ -15,12 +15,11 @@
 # limitations under the License.
 #
 
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+ARG HADOOP_VERSION=2.8.5
+ARG HIVE_VERSION=1.2.1
+ARG HBASE_VERSION=1.1.2
+ARG SPARK_VERSION=2.3.3
+
+FROM apachekylin/kylin-client:hadoop_${HADOOP_VERSION}_hive_${HIVE_VERSION}_spark_${HBASE_VERSION}_spark_${SPARK_VERSION} AS client
+
+#CMD ["/bin/bash"]
\ No newline at end of file
diff --git a/docker/dockerfile/cluster/kylin/entrypoint.sh b/docker/dockerfile/cluster/kylin/entrypoint.sh
new file mode 100644
index 0000000..7a693aa
--- /dev/null
+++ b/docker/dockerfile/cluster/kylin/entrypoint.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+
diff --git a/docker/dockerfile/cluster/metastore-db/Dockerfile b/docker/dockerfile/cluster/metastore-db/Dockerfile
new file mode 100644
index 0000000..8a78964
--- /dev/null
+++ b/docker/dockerfile/cluster/metastore-db/Dockerfile
@@ -0,0 +1,12 @@
+ARG MYSQL_VERSION=5.6.49
+FROM mysql:${MYSQL_VERSION}
+
+ARG CREATE_DBS="kylin hive"
+ENV CREATE_DBS=$CREATE_DBS
+
+COPY run_db.sh /run_db.sh
+RUN chmod +x /run_db.sh
+
+ENTRYPOINT ["docker-entrypoint.sh"]
+
+CMD ["/run_db.sh"]
diff --git a/docker/dockerfile/cluster/metastore-db/run_db.sh b/docker/dockerfile/cluster/metastore-db/run_db.sh
new file mode 100644
index 0000000..dfaaef1
--- /dev/null
+++ b/docker/dockerfile/cluster/metastore-db/run_db.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+mysqld --user=root
+
+mysqladmin -uroot password kylin
+mysql -uroot -pkylin -e "grant all privileges on root.* to root@'%' identified by 'kylin' WITH GRANT OPTION; FLUSH PRIVILEGES;"
+
+for db in $CREATE_DBS; do
+  mysql -uroot -pkylin -e "create database $db;"
+  done
+
+while :
+do
+    sleep 10
+done
diff --git a/docker/build_image.sh b/docker/dockerfile/cluster/namenode/Dockerfile
old mode 100755
new mode 100644
similarity index 61%
copy from docker/build_image.sh
copy to docker/dockerfile/cluster/namenode/Dockerfile
index 9c0b925..3418680
--- a/docker/build_image.sh
+++ b/docker/dockerfile/cluster/namenode/Dockerfile
@@ -1,5 +1,3 @@
-#!/usr/bin/env bash
-
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -17,11 +15,22 @@
 # limitations under the License.
 #
 
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd ${DIR}
-echo "build image in dir "${DIR}
+ARG HADOOP_VERSION=2.8.5
+FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+
+ENV HADOOP_VERSION ${HADOOP_VERSION}
+
+ARG HADOOP_WEBHDFS_PORT=50070
+ENV HADOOP_WEBHDFS_PORT ${HADOOP_WEBHDFS_PORT}
+EXPOSE ${HADOOP_WEBHDFS_PORT} 8020
+
+HEALTHCHECK CMD curl -f http://localhost:${HADOOP_WEBHDFS_PORT}/ || exit 1
+
+ENV HDFS_CONF_dfs_namenode_name_dir=file:///hadoop/dfs/name
+RUN mkdir -p /hadoop/dfs/name
+VOLUME /hadoop/dfs/name
 
+ADD run_nn.sh /run_nn.sh
+RUN chmod a+x /run_nn.sh
 
-echo "start build Hadoop docker image"
-docker build -f Dockerfile_hadoop -t hadoop2.7-all-in-one-for-kylin4 .
-docker build -f Dockerfile -t apachekylin/apache-kylin-standalone:4.0.0-alpha .
+CMD ["/run_nn.sh"]
diff --git a/docker/build_image.sh b/docker/dockerfile/cluster/namenode/run_nn.sh
old mode 100755
new mode 100644
similarity index 61%
rename from docker/build_image.sh
rename to docker/dockerfile/cluster/namenode/run_nn.sh
index 9c0b925..e4dc90f
--- a/docker/build_image.sh
+++ b/docker/dockerfile/cluster/namenode/run_nn.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/env bash
+#!/bin/bash
 
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
@@ -17,11 +17,20 @@
 # limitations under the License.
 #
 
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd ${DIR}
-echo "build image in dir "${DIR}
+namedir=`echo $HDFS_CONF_dfs_namenode_name_dir | perl -pe 's#file://##'`
+if [ ! -d $namedir ]; then
+  echo "Namenode name directory not found: $namedir"
+  exit 2
+fi
 
+if [ -z "$CLUSTER_NAME" ]; then
+  echo "Cluster name not specified"
+  exit 2
+fi
 
-echo "start build Hadoop docker image"
-docker build -f Dockerfile_hadoop -t hadoop2.7-all-in-one-for-kylin4 .
-docker build -f Dockerfile -t apachekylin/apache-kylin-standalone:4.0.0-alpha .
+if [ "`ls -A $namedir`" == "" ]; then
+  echo "Formatting namenode name directory: $namedir"
+  $HADOOP_PREFIX/bin/hdfs --config $HADOOP_CONF_DIR namenode -format $CLUSTER_NAME 
+fi
+
+$HADOOP_PREFIX/bin/hdfs --config $HADOOP_CONF_DIR namenode
diff --git a/docker/run_container.sh b/docker/dockerfile/cluster/nodemanager/Dockerfile
old mode 100755
new mode 100644
similarity index 76%
copy from docker/run_container.sh
copy to docker/dockerfile/cluster/nodemanager/Dockerfile
index 3ed32ce..8ec68df
--- a/docker/run_container.sh
+++ b/docker/dockerfile/cluster/nodemanager/Dockerfile
@@ -15,12 +15,15 @@
 # limitations under the License.
 #
 
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+ARG HADOOP_VERSION=2.8.5
+FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+
+MAINTAINER kylin
+
+EXPOSE 8042
+HEALTHCHECK CMD curl -f http://localhost:8042/ || exit 1
+
+ADD run_nm.sh /run_nm.sh
+RUN chmod a+x /run_nm.sh
+
+CMD ["/run_nm.sh"]
diff --git a/docker/run_container.sh b/docker/dockerfile/cluster/nodemanager/run_nm.sh
old mode 100755
new mode 100644
similarity index 82%
copy from docker/run_container.sh
copy to docker/dockerfile/cluster/nodemanager/run_nm.sh
index 3ed32ce..9a36690
--- a/docker/run_container.sh
+++ b/docker/dockerfile/cluster/nodemanager/run_nm.sh
@@ -1,3 +1,5 @@
+#!/bin/bash
+
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -15,12 +17,4 @@
 # limitations under the License.
 #
 
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+$HADOOP_PREFIX/bin/yarn --config $HADOOP_CONF_DIR nodemanager
diff --git a/docker/dockerfile/cluster/pom.xml b/docker/dockerfile/cluster/pom.xml
new file mode 100644
index 0000000..f6640a2
--- /dev/null
+++ b/docker/dockerfile/cluster/pom.xml
@@ -0,0 +1,81 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>hudi</artifactId>
+    <groupId>org.apache.hudi</groupId>
+    <version>0.6.1-SNAPSHOT</version>
+    <relativePath>../../../../pom.xml</relativePath>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+
+  <artifactId>hudi-hadoop-docker</artifactId>
+  <packaging>pom</packaging>
+  <modules>
+    <module>base</module>
+    <module>namenode</module>
+    <module>datanode</module>
+    <module>historyserver</module>
+    <module>hive_base</module>
+    <module>spark_base</module>
+    <module>sparkmaster</module>
+    <module>sparkworker</module>
+    <module>sparkadhoc</module>
+    <module>prestobase</module>
+  </modules>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hudi</groupId>
+      <artifactId>hudi-spark-bundle_${scala.binary.version}</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+  </dependencies>
+
+  <properties>
+    <skipITs>false</skipITs>
+    <docker.build.skip>true</docker.build.skip>
+    <docker.spark.version>2.4.4</docker.spark.version>
+    <docker.hive.version>2.3.3</docker.hive.version>
+    <docker.hadoop.version>2.8.4</docker.hadoop.version>
+    <docker.presto.version>0.217</docker.presto.version>
+    <dockerfile.maven.version>1.4.3</dockerfile.maven.version>
+    <checkstyle.skip>true</checkstyle.skip>
+    <main.basedir>${project.parent.basedir}</main.basedir>
+  </properties>
+
+  <build>
+    <extensions>
+      <extension>
+        <groupId>com.spotify</groupId>
+        <artifactId>dockerfile-maven-extension</artifactId>
+        <version>${dockerfile.maven.version}</version>
+      </extension>
+    </extensions>
+    <plugins>
+     <plugin>
+        <groupId>com.spotify</groupId>
+        <artifactId>dockerfile-maven-plugin</artifactId>
+        <version>${dockerfile.maven.version}</version>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git a/docker/run_container.sh b/docker/dockerfile/cluster/resourcemanager/Dockerfile
old mode 100755
new mode 100644
similarity index 76%
copy from docker/run_container.sh
copy to docker/dockerfile/cluster/resourcemanager/Dockerfile
index 3ed32ce..b99027f
--- a/docker/run_container.sh
+++ b/docker/dockerfile/cluster/resourcemanager/Dockerfile
@@ -15,12 +15,15 @@
 # limitations under the License.
 #
 
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+ARG HADOOP_VERSION=2.8.5
+FROM apachekylin/kylin-hadoop-base:hadoop_${HADOOP_VERSION}
+
+MAINTAINER kylin
+
+EXPOSE 8088
+HEALTHCHECK CMD curl -f http://localhost:8088/ || exit 1
+
+ADD run_rm.sh /run_rm.sh
+RUN chmod a+x /run_rm.sh
+
+CMD ["/run_rm.sh"]
diff --git a/docker/run_container.sh b/docker/dockerfile/cluster/resourcemanager/run_rm.sh
old mode 100755
new mode 100644
similarity index 82%
copy from docker/run_container.sh
copy to docker/dockerfile/cluster/resourcemanager/run_rm.sh
index 3ed32ce..ed15e46
--- a/docker/run_container.sh
+++ b/docker/dockerfile/cluster/resourcemanager/run_rm.sh
@@ -1,3 +1,5 @@
+#!/bin/bash
+
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -15,12 +17,4 @@
 # limitations under the License.
 #
 
-docker run -d \
--m 8G \
--p 7070:7070 \
--p 8088:8088 \
--p 50070:50070 \
--p 8032:8032 \
--p 8042:8042 \
--p 2181:2181 \
-apachekylin/apache-kylin-standalone:4.0.0-alpha
+$HADOOP_PREFIX/bin/yarn --config $HADOOP_CONF_DIR resourcemanager
diff --git a/docker/Dockerfile b/docker/dockerfile/standalone/Dockerfile
similarity index 100%
rename from docker/Dockerfile
rename to docker/dockerfile/standalone/Dockerfile
diff --git a/docker/conf/hadoop/core-site.xml b/docker/dockerfile/standalone/conf/hadoop/core-site.xml
similarity index 100%
rename from docker/conf/hadoop/core-site.xml
rename to docker/dockerfile/standalone/conf/hadoop/core-site.xml
diff --git a/docker/conf/hadoop/hdfs-site.xml b/docker/dockerfile/standalone/conf/hadoop/hdfs-site.xml
similarity index 100%
rename from docker/conf/hadoop/hdfs-site.xml
rename to docker/dockerfile/standalone/conf/hadoop/hdfs-site.xml
diff --git a/docker/conf/hadoop/mapred-site.xml b/docker/dockerfile/standalone/conf/hadoop/mapred-site.xml
similarity index 100%
rename from docker/conf/hadoop/mapred-site.xml
rename to docker/dockerfile/standalone/conf/hadoop/mapred-site.xml
diff --git a/docker/conf/hadoop/yarn-site.xml b/docker/dockerfile/standalone/conf/hadoop/yarn-site.xml
similarity index 100%
rename from docker/conf/hadoop/yarn-site.xml
rename to docker/dockerfile/standalone/conf/hadoop/yarn-site.xml
diff --git a/docker/conf/hive/hive-site.xml b/docker/dockerfile/standalone/conf/hive/hive-site.xml
similarity index 100%
rename from docker/conf/hive/hive-site.xml
rename to docker/dockerfile/standalone/conf/hive/hive-site.xml
diff --git a/docker/conf/maven/settings.xml b/docker/dockerfile/standalone/conf/maven/settings.xml
similarity index 100%
rename from docker/conf/maven/settings.xml
rename to docker/dockerfile/standalone/conf/maven/settings.xml
diff --git a/docker/entrypoint.sh b/docker/dockerfile/standalone/entrypoint.sh
similarity index 100%
rename from docker/entrypoint.sh
rename to docker/dockerfile/standalone/entrypoint.sh
diff --git a/docker/setup_cluster.sh b/docker/setup_cluster.sh
new file mode 100644
index 0000000..0e3a260
--- /dev/null
+++ b/docker/setup_cluster.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+# limitations under the License.
+
+SCRIPT_PATH=$(cd `dirname $0`; pwd)
+WS_ROOT=`dirname $SCRIPT_PATH`
+
+source ${SCRIPT_PATH}/build_cluster_images.sh
+
+# restart cluster
+source ${SCRIPT_PATH}/build_cluster_images.sh
+KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/write/docker-compose-write.yml down
+sleep 10
+KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/dokcer-compose/write/docker-compose-write.yml up -d
diff --git a/docker/run_container.sh b/docker/setup_standalone.sh
similarity index 100%
rename from docker/run_container.sh
rename to docker/setup_standalone.sh
diff --git a/docker/stop_cluster.sh b/docker/stop_cluster.sh
new file mode 100644
index 0000000..87f0ac4
--- /dev/null
+++ b/docker/stop_cluster.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+#  Licensed to the Apache Software Foundation (ASF) under one
+#  or more contributor license agreements.  See the NOTICE file
+#  distributed with this work for additional information
+#  regarding copyright ownership.  The ASF licenses this file
+#  to you under the Apache License, Version 2.0 (the
+#  "License"); you may not use this file except in compliance
+#  with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+# limitations under the License.
+
+SCRIPT_PATH=$(cd `dirname $0`; pwd)
+# set up root directory
+WS_ROOT=`dirname $SCRIPT_PATH`
+# shut down cluster
+KYLIN_WS=${WS_ROOT} docker-compose -f ${SCRIPT_PATH}/docker-compose/write/docker-compose-write.yml down