You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@linkis.apache.org by le...@apache.org on 2022/07/31 07:19:51 UTC

[incubator-linkis] branch dev-1.3.0 updated: [Feature] introduced a new image, called LDH (Linkis's hadoop all-in-one image) (#2540)

This is an automated email from the ASF dual-hosted git repository.

legendtkl pushed a commit to branch dev-1.3.0
in repository https://gitbox.apache.org/repos/asf/incubator-linkis.git


The following commit(s) were added to refs/heads/dev-1.3.0 by this push:
     new 33402e363 [Feature] introduced a new image, called LDH (Linkis's hadoop all-in-one image) (#2540)
33402e363 is described below

commit 33402e3639ab68e15076c6cedf872bd50bbd1775
Author: Aaron Lin <51...@users.noreply.github.com>
AuthorDate: Sun Jul 31 15:19:45 2022 +0800

    [Feature] introduced a new image, called LDH (Linkis's hadoop all-in-one image) (#2540)
    
    * [Feature] introduced a new image, called LDH (Linkis's hadoop all-in-one image)
    
    * [Feature] introduced a new image, called LDH (Linkis's hadoop all-in-one image)
    
    * [Feature] add some sample
    
    * [Feature] fix some snytax
    
    * [Mod] change version of hadoop components and add zookeeper
---
 README.md                                          |   2 +
 linkis-dist/docker/ldh.Dockerfile                  |  89 +++
 .../docker/{Dockerfile => linkis.Dockerfile}       |   0
 linkis-dist/docker/scripts/entry-point-ldh.sh      |  53 ++
 linkis-dist/docker/scripts/prepare-ldh-image.sh    |  82 +++
 linkis-dist/helm/README.md                         | 140 ++++-
 .../helm/scripts/{install-charts.sh => common.sh}  |  20 +-
 ...{create-test-kind.sh => create-kind-cluster.sh} |  25 +-
 linkis-dist/helm/scripts/install-charts.sh         |  16 +-
 .../scripts/{install-charts.sh => install-ldh.sh}  |  27 +-
 .../scripts/{login-pod.sh => install-mysql.sh}     |  23 +-
 linkis-dist/helm/scripts/login-pod.sh              |   0
 linkis-dist/helm/scripts/remote-debug-proxy.sh     |   0
 .../resources/ldh/configmaps/configmap-flink.yaml  | 562 ++++++++++++++++++
 .../resources/ldh/configmaps/configmap-hadoop.yaml | 659 +++++++++++++++++++++
 .../resources/ldh/configmaps/configmap-hive.yaml   | 230 +++++++
 .../resources/ldh/configmaps/configmap-spark.yaml  | 208 +++++++
 .../ldh/configmaps/configmap-zookeeper.yaml        |  98 +++
 linkis-dist/helm/scripts/resources/ldh/ldh.yaml    | 216 +++++++
 linkis-dist/pom.xml                                |  49 +-
 20 files changed, 2443 insertions(+), 56 deletions(-)

diff --git a/README.md b/README.md
index dfa59252f..f38142c46 100644
--- a/README.md
+++ b/README.md
@@ -85,6 +85,8 @@ Please go to the [Linkis Releases Page](https://github.com/apache/incubator-link
 ./mvnw clean install -Pdocker -Dmaven.javadoc.skip=true -Dmaven.test.skip=true
 # - Option 3: linkis distribution package and docker image (included web)
 ./mvnw clean install -Pdocker -Dmaven.javadoc.skip=true -Dmaven.test.skip=true -Dlinkis.build.web=true
+# - Option 4: linkis distribution package and docker image (included web and ldh (hadoop all in one for test))
+./mvnw clean install -Pdocker -Dmaven.javadoc.skip=true -Dmaven.test.skip=true -Dlinkis.build.web=true -Dlinkis.build.ldh=true
 
 ### Windows
 mvnw.cmd -N install
diff --git a/linkis-dist/docker/ldh.Dockerfile b/linkis-dist/docker/ldh.Dockerfile
new file mode 100644
index 000000000..87f164404
--- /dev/null
+++ b/linkis-dist/docker/ldh.Dockerfile
@@ -0,0 +1,89 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+######################################################################
+# hadoop all in one image
+######################################################################
+
+ARG IMAGE_BASE=centos:7
+
+FROM ${IMAGE_BASE} as linkis-ldh
+
+ARG JDK_VERSION=1.8.0-openjdk
+ARG JDK_BUILD_REVISION=1.8.0.332.b09-1.el7_9
+ARG MYSQL_JDBC_VERSION=5.1.49
+
+ARG HADOOP_VERSION=2.7.2
+ARG HIVE_VERSION=2.3.3
+ARG SPARK_VERSION=2.4.3
+ARG SPARK_HADOOP_VERSION=2.7
+ARG FLINK_VERSION=1.12.2
+ARG ZOOKEEPER_VERSION=3.5.9
+
+ARG LINKIS_VERSION=0.0.0
+
+RUN useradd -r -s /bin/bash -u 100001 -g root -G wheel hadoop
+
+RUN yum install -y \
+       vim unzip curl sudo krb5-workstation sssd crontabs net-tools python-pip \
+       java-${JDK_VERSION}-${JDK_BUILD_REVISION} \
+       java-${JDK_VERSION}-devel-${JDK_BUILD_REVISION} \
+       mysql \
+    && yum clean all
+
+RUN sed -i "s#^%wheel.*#%wheel        ALL=(ALL)       NOPASSWD: ALL#g" /etc/sudoers
+
+RUN mkdir -p /opt/ldh/${LINKIS_VERSION} \
+    && mkdir -p /opt/ldh/current \
+    && mkdir -p /data \
+    && chmod 777 -R /data
+
+ADD ldh-tars/hadoop-${HADOOP_VERSION}.tar.gz /opt/ldh/${LINKIS_VERSION}/
+ADD ldh-tars/apache-hive-${HIVE_VERSION}-bin.tar.gz /opt/ldh/${LINKIS_VERSION}/
+ADD ldh-tars/spark-${SPARK_VERSION}-bin-hadoop${SPARK_HADOOP_VERSION}.tgz /opt/ldh/${LINKIS_VERSION}/
+ADD ldh-tars/flink-${FLINK_VERSION}-bin-scala_2.11.tgz /opt/ldh/${LINKIS_VERSION}/
+ADD ldh-tars/apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz /opt/ldh/${LINKIS_VERSION}/
+
+RUN mkdir -p /etc/ldh \
+    && mkdir -p /var/log/hadoop && chmod 777 -R /var/log/hadoop \
+    && mkdir -p /var/log/hive && chmod 777 -R /var/log/hive \
+    && mkdir -p /var/log/spark && chmod 777 -R /var/log/spark \
+    && mkdir -p /var/log/flink && chmod 777 -R /var/log/flink \
+    && mkdir -p /var/log/zookeeper && chmod 777 -R /var/log/zookeeper \
+    && ln -s /opt/ldh/${LINKIS_VERSION}/hadoop-${HADOOP_VERSION} /opt/ldh/current/hadoop \
+    && ln -s /opt/ldh/${LINKIS_VERSION}/apache-hive-${HIVE_VERSION}-bin /opt/ldh/current/hive \
+    && ln -s /opt/ldh/${LINKIS_VERSION}/spark-${SPARK_VERSION}-bin-hadoop${SPARK_HADOOP_VERSION} /opt/ldh/current/spark \
+    && ln -s /opt/ldh/${LINKIS_VERSION}/flink-${FLINK_VERSION} /opt/ldh/current/flink \
+    && ln -s /opt/ldh/${LINKIS_VERSION}/apache-zookeeper-${ZOOKEEPER_VERSION}-bin /opt/ldh/current/zookeeper
+
+RUN curl -L -o /opt/ldh/current/hive/lib/mysql-connector-java-${MYSQL_JDBC_VERSION}.jar \
+            https://repo1.maven.org/maven2/mysql/mysql-connector-java/${MYSQL_JDBC_VERSION}/mysql-connector-java-${MYSQL_JDBC_VERSION}.jar \
+    && cp /opt/ldh/current/hive/lib/mysql-connector-java-${MYSQL_JDBC_VERSION}.jar /opt/ldh/current/spark/jars/
+
+ENV JAVA_HOME /etc/alternatives/jre
+ENV PATH /opt/ldh/current/hadoop/bin:/opt/ldh/current/hive/bin:/opt/ldh/current/spark/bin:/opt/ldh/current/flink/bin:/opt/ldh/current/zookeeper/bin:$PATH
+ENV HADOOP_CONF_DIR=/etc/ldh/hadoop
+ENV HIVE_CONF_DIR=/etc/ldh/hive
+ENV SPARK_CONF_DIR=/etc/ldh/spark
+ENV FLINK_CONF_DIR=/etc/ldh/flink
+ENV ZOOCFGDIR=/etc/ldh/zookeeper
+ENV ZOO_LOG_DIR=/var/log/zookeeper
+
+COPY entry-point-ldh.sh /usr/bin/start-all.sh
+RUN chmod +x /usr/bin/start-all.sh
+
+CMD ["sh", "/usr/bin/start-all.sh"]
diff --git a/linkis-dist/docker/Dockerfile b/linkis-dist/docker/linkis.Dockerfile
similarity index 100%
rename from linkis-dist/docker/Dockerfile
rename to linkis-dist/docker/linkis.Dockerfile
diff --git a/linkis-dist/docker/scripts/entry-point-ldh.sh b/linkis-dist/docker/scripts/entry-point-ldh.sh
new file mode 100644
index 000000000..252f34498
--- /dev/null
+++ b/linkis-dist/docker/scripts/entry-point-ldh.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+WORK_DIR=`cd $(dirname $0); pwd -P`
+
+# start all components
+# - hdfs & yarn
+hdfs namenode -format
+/opt/ldh/current/hadoop/sbin/hadoop-daemon.sh start namenode
+/opt/ldh/current/hadoop/sbin/hadoop-daemon.sh start datanode
+/opt/ldh/current/hadoop/sbin/yarn-daemon.sh start resourcemanager
+/opt/ldh/current/hadoop/sbin/yarn-daemon.sh start nodemanager
+
+# - init dirs on hdfs
+hdfs dfs -mkdir -p /tmp
+hdfs dfs -chmod -R 777 /tmp
+hdfs dfs -mkdir -p /user
+hdfs dfs -chmod -R 777 /user
+hdfs dfs -mkdir -p /spark2-history
+hdfs dfs -chmod -R 777 /spark2-history
+hdfs dfs -mkdir -p /completed-jobs
+hdfs dfs -chmod -R 777 /completed-jobs
+
+# - hive
+/opt/ldh/current/hive/bin/schematool -initSchema -dbType mysql
+/opt/ldh/current/hive/bin/hive --service metastore > /var/log/hive/metastore.out 2>&1 &
+/opt/ldh/current/hive/bin/hive --service hiveserver2 > /var/log/hive/hiveserver2.out 2>&1 &
+
+# spark
+/opt/ldh/current/spark/sbin/start-history-server.sh
+
+# flink
+/opt/ldh/current/flink/bin/yarn-session.sh --detached
+
+# zookeeper
+/opt/ldh/current/zookeeper/bin/zkServer.sh start
+
+# hold on
+while true; do sleep 3600; done
diff --git a/linkis-dist/docker/scripts/prepare-ldh-image.sh b/linkis-dist/docker/scripts/prepare-ldh-image.sh
new file mode 100755
index 000000000..65ccc6e85
--- /dev/null
+++ b/linkis-dist/docker/scripts/prepare-ldh-image.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+download() {
+  TAR_URL=$1
+  TAR_FILE=$2
+  HARD_LINK_ROOT=$3
+
+  if [ ! -f ${TAR_CACHE_ROOT}/${TAR_FILE} ]; then
+    echo "- downloading ${TAR_FILE} to ${TAR_CACHE_ROOT} from ${TAR_URL}"
+    curl -L ${TAR_URL} -o ${TAR_CACHE_ROOT}/${TAR_FILE}
+  else
+    echo "- ${TAR_FILE} already exists in ${TAR_CACHE_ROOT}, downloading skipped."
+  fi
+
+  echo "- create hard link: ${HARD_LINK_ROOT}/${TAR_FILE} -> ${TAR_CACHE_ROOT}/${TAR_FILE}"
+  rm -rf ${HARD_LINK_ROOT}/${TAR_FILE}
+  ln ${TAR_CACHE_ROOT}/${TAR_FILE} ${HARD_LINK_ROOT}/${TAR_FILE}
+}
+
+WORK_DIR=`cd $(dirname $0); pwd -P`
+PROJECT_ROOT=${WORK_DIR}/../..
+PROJECT_TARGET=${PROJECT_ROOT}/target
+
+TAR_CACHE_ROOT=${HOME}/.linkis-build-cache
+LDH_TAR_DIR=${PROJECT_TARGET}/ldh-tars
+
+mkdir -p ${TAR_CACHE_ROOT}
+rm -rf ${LDH_TAR_DIR} && mkdir -p ${LDH_TAR_DIR}
+
+rm -rf ${PROJECT_TARGET}/entry-point-ldh.sh
+cp ${WORK_DIR}/entry-point-ldh.sh ${PROJECT_TARGET}/
+
+MYSQL_VERSION=${MYSQL_VERSION:-5.7}
+HADOOP_VERSION=${HADOOP_VERSION:-2.7.2}
+HIVE_VERSION=${HIVE_VERSION:-2.3.3}
+SPARK_VERSION=${SPARK_VERSION:-2.4.3}
+SPARK_HADOOP_VERSION=${SPARK_HADOOP_VERSION:-2.7}
+FLINK_VERSION=${FLINK_VERSION:-1.12.2}
+ZOOKEEPER_VERSION=${ZOOKEEPER_VERSION:-3.5.9}
+
+set -e
+
+# evaluate project version
+PROJECT_VERSION=`cd ${PROJECT_ROOT} \
+   && MAVEN_OPTS="-Dorg.slf4j.simpleLogger.defaultLogLevel=WARN -Dorg.slf4j.simpleLogger.log.org.apache.maven.plugins.help=INFO" \
+   mvn help:evaluate -o -Dexpression=project.version | tail -1`
+echo "# Project version: ${PROJECT_VERSION}"
+
+echo "# Tars for hadoop component will be cached to: ${TAR_CACHE_ROOT}"
+
+TARFILENAME_HADOOP="hadoop-${HADOOP_VERSION}.tar.gz"
+TARFILENAME_HIVE="apache-hive-${HIVE_VERSION}-bin.tar.gz"
+TARFILENAME_SPARK="spark-${SPARK_VERSION}-bin-hadoop${SPARK_HADOOP_VERSION}.tgz"
+TARFILENAME_FLINK="flink-${FLINK_VERSION}-bin-scala_2.11.tgz"
+TARFILENAME_ZOOKEEPER="apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz"
+
+DOWNLOAD_URL_HADOOP="https://archive.apache.org/dist/hadoop/common/hadoop-${HADOOP_VERSION}/${TARFILENAME_HADOOP}"
+DOWNLOAD_URL_HIVE="https://archive.apache.org/dist/hive/hive-${HIVE_VERSION}/${TARFILENAME_HIVE}"
+DOWNLOAD_URL_SPARK="https://archive.apache.org/dist/spark/spark-${SPARK_VERSION}/${TARFILENAME_SPARK}"
+DOWNLOAD_URL_FLINK="https://archive.apache.org/dist/flink/flink-${FLINK_VERSION}/${TARFILENAME_FLINK}"
+DOWNLOAD_URL_ZOOKEEPER="https://archive.apache.org/dist/zookeeper/zookeeper-${ZOOKEEPER_VERSION}/${TARFILENAME_ZOOKEEPER}"
+
+download ${DOWNLOAD_URL_HADOOP} ${TARFILENAME_HADOOP} ${LDH_TAR_DIR}
+download ${DOWNLOAD_URL_HIVE} ${TARFILENAME_HIVE} ${LDH_TAR_DIR}
+download ${DOWNLOAD_URL_SPARK} ${TARFILENAME_SPARK} ${LDH_TAR_DIR}
+download ${DOWNLOAD_URL_FLINK} ${TARFILENAME_FLINK} ${LDH_TAR_DIR}
+download ${DOWNLOAD_URL_ZOOKEEPER} ${TARFILENAME_ZOOKEEPER} ${LDH_TAR_DIR}
diff --git a/linkis-dist/helm/README.md b/linkis-dist/helm/README.md
index ab97fa8d1..a198588fb 100644
--- a/linkis-dist/helm/README.md
+++ b/linkis-dist/helm/README.md
@@ -45,7 +45,8 @@ Once after you have installed KinD, you can run the following command to setup a
 # It will deploy a MySQL instance in the KinD cluster,
 # then deploy an Apache Linkis cluster, which will use 
 # the MySQL instances above 
-$> sh ./scripts/create-test-kind.sh \
+$> sh ./scripts/create-kind-cluster.sh \
+   && sh ./scripts/install-mysql.sh \
    && sh ./scripts/install-charts.sh
    
 Creating cluster "test-helm" ...
@@ -165,3 +166,140 @@ $> helm delete --namespace linkis linkis-demo
 # the helm release first
 $> kind delete cluster --name test-helm
 ```
+
+## Test with LDH 
+We introduced a new image, called LDH (Linkis's hadoop all-in-one image), which provides a pseudo-distributed hadoop cluster for testing quickly. This image contains the following hadoop components, the default mode for engines in LDH is on-yarn.
+* Hadoop 2.7.2 , including HDFS and YARN
+* Hive 2.3.3
+* Spark 2.4.3
+* Flink 1.12.2
+* ZooKeeper 3.5.9
+
+> INFO: The hive in LDH image depends on external mysql, please deploy mysql first before deploying LDH.
+
+To make an LDH image, please run the maven command on the root of the project as below
+
+```shell
+$> ./mvnw clean install -Pdocker \
+   -Dmaven.javadoc.skip=true \
+   -Dmaven.test.skip=true \
+   -Dlinkis.build.web=true \
+   -Dlinkis.build.ldh=true
+```
+
+By default, we download the pre-built binary distributions for each hadoop component from the official site of [Apache Archives](https://archive.apache.org/dist/), which can be very slow for members in some regions.
+Downloading the distributions from a faster mirror site manually and moving it into this directory `${HOME}/.linkis-build-cache` can solve this problem.
+
+Run the following command to setup a local kubernetes cluster with LDH on it.
+
+```shell
+# create and deploy
+$> sh ./scripts/create-kind-cluster.sh \
+   && sh ./scripts/install-mysql.sh \
+   && sh ./scripts/install-ldh.sh \
+   && sh ./scripts/install-charts.sh
+   
+...
+
+# take a try
+$> kubectl exec -it -n ldh $(kubectl get pod -n ldh -o jsonpath='{.items[0].metadata.name}') -- bash
+
+[root@ldh-96bdc757c-dnkbs /]# hdfs dfs -ls /
+Found 4 items
+drwxrwxrwx   - root supergroup          0 2022-07-31 02:48 /completed-jobs
+drwxrwxrwx   - root supergroup          0 2022-07-31 02:48 /spark2-history
+drwxrwxrwx   - root supergroup          0 2022-07-31 02:49 /tmp
+drwxrwxrwx   - root supergroup          0 2022-07-31 02:48 /user
+
+[root@ldh-96bdc757c-dnkbs /]# beeline -u jdbc:hive2://ldh.ldh.svc.cluster.local:10000/ -n hadoop
+Connecting to jdbc:hive2://ldh.ldh.svc.cluster.local:10000/
+Connected to: Apache Hive (version 2.3.3)
+Driver: Hive JDBC (version 2.3.3)
+Transaction isolation: TRANSACTION_REPEATABLE_READ
+Beeline version 2.3.3 by Apache Hive
+0: jdbc:hive2://ldh.ldh.svc.cluster.local:100> create database demo;
+No rows affected (1.306 seconds)
+0: jdbc:hive2://ldh.ldh.svc.cluster.local:100> use demo;
+No rows affected (0.046 seconds)
+0: jdbc:hive2://ldh.ldh.svc.cluster.local:100> create table t1 (id int, data string);
+No rows affected (0.709 seconds)
+0: jdbc:hive2://ldh.ldh.svc.cluster.local:100> insert into t1 values(1, 'linikis demo');
+WARNING: Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
+No rows affected (5.491 seconds)
+0: jdbc:hive2://ldh.ldh.svc.cluster.local:100> select * from t1;
++--------+---------------+
+| t1.id  |    t1.data    |
++--------+---------------+
+| 1      | linikis demo  |
++--------+---------------+
+1 row selected (0.39 seconds)
+0: jdbc:hive2://ldh.ldh.svc.cluster.local:100> !q
+
+[root@ldh-96bdc757c-dnkbs /]# spark-sql
+22/07/31 02:53:18 INFO hive.metastore: Trying to connect to metastore with URI thrift://ldh.ldh.svc.cluster.local:9083
+22/07/31 02:53:18 INFO hive.metastore: Connected to metastore.
+...
+22/07/31 02:53:19 INFO spark.SparkContext: Running Spark version 2.4.3
+22/07/31 02:53:19 INFO spark.SparkContext: Submitted application: SparkSQL::10.244.0.6
+...
+22/07/31 02:53:27 INFO yarn.Client: Submitting application application_1659235712576_0001 to ResourceManager
+22/07/31 02:53:27 INFO impl.YarnClientImpl: Submitted application application_1659235712576_0001
+22/07/31 02:53:27 INFO cluster.SchedulerExtensionServices: Starting Yarn extension services with app application_1659235712576_0001 and attemptId None
+22/07/31 02:53:28 INFO yarn.Client: Application report for application_1659235712576_0001 (state: ACCEPTED)
+...
+22/07/31 02:53:36 INFO yarn.Client: Application report for application_1659235712576_0001 (state: RUNNING)
+...
+Spark master: yarn, Application Id: application_1659235712576_0001
+22/07/31 02:53:46 INFO thriftserver.SparkSQLCLIDriver: Spark master: yarn, Application Id: application_1659235712576_0001
+spark-sql> use demo;
+Time taken: 0.074 seconds
+22/07/31 02:58:02 INFO thriftserver.SparkSQLCLIDriver: Time taken: 0.074 seconds
+spark-sql> select * from t1;
+...
+1       linikis demo
+2       linkis demo spark sql
+Time taken: 3.352 seconds, Fetched 2 row(s)
+spark-sql> quit;
+
+[root@ldh-96bdc757c-dnkbs /]# zkCli.sh
+Connecting to localhost:2181
+Welcome to ZooKeeper!
+JLine support is enabled
+WATCHER::
+
+WatchedEvent state:SyncConnected type:None path:null
+
+[zk: localhost:2181(CONNECTED) 0] get -s /zookeeper/quota
+
+cZxid = 0x0
+ctime = Thu Jan 01 00:00:00 UTC 1970
+mZxid = 0x0
+mtime = Thu Jan 01 00:00:00 UTC 1970
+pZxid = 0x0
+cversion = 0
+dataVersion = 0
+aclVersion = 0
+ephemeralOwner = 0x0
+dataLength = 0
+numChildren = 0
+[zk: localhost:2181(CONNECTED) 1] quit
+
+[root@ldh-96bdc757c-dnkbs /]# flink run /opt/ldh/current/flink/examples/streaming/TopSpeedWindowing.jar
+Executing TopSpeedWindowing example with default input data set.
+Use --input to specify file input.
+Printing result to stdout. Use --output to specify output path.
+...
+```
+
+You can access services of LDH in the kubernetes cluster with the endpoint `ldh.ldh.svc.cluster.local`, for example, access hdfs from your pod:
+
+```shell
+[root@sample-pod /]# hdfs dfs -ls hdfs://ldh.ldh.svc.cluster.local:9000/
+Found 4 items
+drwxrwxrwx   - root supergroup          0 2022-07-28 04:58 hdfs://ldh.ldh.svc.cluster.local:9000/completed-jobs
+drwxrwxrwx   - root supergroup          0 2022-07-28 05:22 hdfs://ldh.ldh.svc.cluster.local:9000/spark2-history
+drwxrwxrwx   - root supergroup          0 2022-07-28 04:58 hdfs://ldh.ldh.svc.cluster.local:9000/tmp
+drwxr-xr-x   - root supergroup          0 2022-07-28 05:20 hdfs://ldh.ldh.svc.cluster.local:9000/user
+```
+
+Finally, you can access the web ui with `kubectl port-forward` .
diff --git a/linkis-dist/helm/scripts/install-charts.sh b/linkis-dist/helm/scripts/common.sh
old mode 100644
new mode 100755
similarity index 66%
copy from linkis-dist/helm/scripts/install-charts.sh
copy to linkis-dist/helm/scripts/common.sh
index 465f89903..3619d29f1
--- a/linkis-dist/helm/scripts/install-charts.sh
+++ b/linkis-dist/helm/scripts/common.sh
@@ -16,16 +16,18 @@
 #
 
 WORK_DIR=`cd $(dirname $0); pwd -P`
+
+PROJECT_ROOT=${WORK_DIR}/../..
+RESOURCE_DIR=${WORK_DIR}/resources
 CHARTS_DIR_ROOT=${WORK_DIR}/../charts
 LINKIS_CHART_DIR=${CHARTS_DIR_ROOT}/linkis
 
-KUBE_NAMESPACE=${1:-linkis}
-HELM_RELEASE_NAME=${2:-linkis-demo}
+KIND_LOAD_IMAGE=${KIND_LOAD_IMAGE:-true}
+KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-test-helm}
+
+# evaluate project version
+PROJECT_VERSION=`cd ${PROJECT_ROOT} \
+   && MAVEN_OPTS="-Dorg.slf4j.simpleLogger.defaultLogLevel=WARN -Dorg.slf4j.simpleLogger.log.org.apache.maven.plugins.help=INFO" \
+   mvn help:evaluate -o -Dexpression=project.version | tail -1`
 
-if [ "X${HELM_DEBUG}" == "Xtrue" ]; then
-  # template helm charts
-  helm template --namespace ${KUBE_NAMESPACE} -f ${LINKIS_CHART_DIR}/values.yaml ${HELM_RELEASE_NAME} ${LINKIS_CHART_DIR}
-else
-  # install helm charts
-  helm install --create-namespace --namespace ${KUBE_NAMESPACE} -f ${LINKIS_CHART_DIR}/values.yaml ${HELM_RELEASE_NAME} ${LINKIS_CHART_DIR}
-fi
+echo "# Project version: ${PROJECT_VERSION}"
diff --git a/linkis-dist/helm/scripts/create-test-kind.sh b/linkis-dist/helm/scripts/create-kind-cluster.sh
old mode 100644
new mode 100755
similarity index 62%
rename from linkis-dist/helm/scripts/create-test-kind.sh
rename to linkis-dist/helm/scripts/create-kind-cluster.sh
index e0bdd9717..bca2ac899
--- a/linkis-dist/helm/scripts/create-test-kind.sh
+++ b/linkis-dist/helm/scripts/create-kind-cluster.sh
@@ -16,20 +16,12 @@
 #
 
 WORK_DIR=`cd $(dirname $0); pwd -P`
-PROJECT_ROOT=${WORK_DIR}/../..
-RESOURCE_DIR=${WORK_DIR}/resources
-TMP_DIR=`mktemp -d -t kind-XXXXX`
 
-set -e
+. ${WORK_DIR}/common.sh
 
-KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-test-helm}
-MYSQL_VERSION=${MYSQL_VERSION:-5.7}
+TMP_DIR=`mktemp -d -t kind-XXXXX`
 
-# evaluate project version
-PROJECT_VERSION=`cd ${PROJECT_ROOT} \
-   && MAVEN_OPTS="-Dorg.slf4j.simpleLogger.defaultLogLevel=WARN -Dorg.slf4j.simpleLogger.log.org.apache.maven.plugins.help=INFO" \
-   mvn help:evaluate -o -Dexpression=project.version | tail -1`
-echo "# Project version: ${PROJECT_VERSION}"
+set -e
 
 # create kind cluster
 echo "# Creating KinD cluster ..."
@@ -44,14 +36,3 @@ KIND_CLUSTER_HOST_PATH=${KIND_CLUSTER_HOST_PATH} envsubst < ${KIND_CLUSTER_CONF_
 echo "- kind cluster config: ${KIND_CLUSTER_CONF_FILE}"
 cat ${KIND_CLUSTER_CONF_FILE}
 kind create cluster --name ${KIND_CLUSTER_NAME} --config ${KIND_CLUSTER_CONF_FILE}
-
-# load images
-echo "# Loading images into KinD cluster ..."
-kind load docker-image linkis:${PROJECT_VERSION} --name ${KIND_CLUSTER_NAME}
-kind load docker-image linkis-web:${PROJECT_VERSION} --name ${KIND_CLUSTER_NAME}
-kind load docker-image mysql:${MYSQL_VERSION} --name ${KIND_CLUSTER_NAME}
-
-# deploy mysql
-echo "# Deploying MySQL ..."
-kubectl create ns mysql
-kubectl apply -n mysql -f ${RESOURCE_DIR}/mysql.yaml
diff --git a/linkis-dist/helm/scripts/install-charts.sh b/linkis-dist/helm/scripts/install-charts.sh
old mode 100644
new mode 100755
index 465f89903..6da003e3b
--- a/linkis-dist/helm/scripts/install-charts.sh
+++ b/linkis-dist/helm/scripts/install-charts.sh
@@ -16,16 +16,28 @@
 #
 
 WORK_DIR=`cd $(dirname $0); pwd -P`
-CHARTS_DIR_ROOT=${WORK_DIR}/../charts
-LINKIS_CHART_DIR=${CHARTS_DIR_ROOT}/linkis
+
+. ${WORK_DIR}/common.sh
 
 KUBE_NAMESPACE=${1:-linkis}
 HELM_RELEASE_NAME=${2:-linkis-demo}
 
+. ${WORK_DIR}/common.sh
+
 if [ "X${HELM_DEBUG}" == "Xtrue" ]; then
   # template helm charts
   helm template --namespace ${KUBE_NAMESPACE} -f ${LINKIS_CHART_DIR}/values.yaml ${HELM_RELEASE_NAME} ${LINKIS_CHART_DIR}
 else
+  # create hadoop configs
+  if [ "X${WITH_LDH}" == "Xtrue" ]; then
+    kubectl apply -n ${KUBE_NAMESPACE} -f ${RESOURCE_DIR}/ldh/configmaps
+  fi
+  # load image
+  if [ "X${KIND_LOAD_IMAGE}" == "Xtrue" ]; then
+    echo "# Loading Linkis image ..."
+    kind load docker-image linkis:${PROJECT_VERSION} --name ${KIND_CLUSTER_NAME}
+    kind load docker-image linkis-web:${PROJECT_VERSION} --name ${KIND_CLUSTER_NAME}
+  fi
   # install helm charts
   helm install --create-namespace --namespace ${KUBE_NAMESPACE} -f ${LINKIS_CHART_DIR}/values.yaml ${HELM_RELEASE_NAME} ${LINKIS_CHART_DIR}
 fi
diff --git a/linkis-dist/helm/scripts/install-charts.sh b/linkis-dist/helm/scripts/install-ldh.sh
old mode 100644
new mode 100755
similarity index 62%
copy from linkis-dist/helm/scripts/install-charts.sh
copy to linkis-dist/helm/scripts/install-ldh.sh
index 465f89903..6db703f9b
--- a/linkis-dist/helm/scripts/install-charts.sh
+++ b/linkis-dist/helm/scripts/install-ldh.sh
@@ -16,16 +16,23 @@
 #
 
 WORK_DIR=`cd $(dirname $0); pwd -P`
-CHARTS_DIR_ROOT=${WORK_DIR}/../charts
-LINKIS_CHART_DIR=${CHARTS_DIR_ROOT}/linkis
 
-KUBE_NAMESPACE=${1:-linkis}
-HELM_RELEASE_NAME=${2:-linkis-demo}
+. ${WORK_DIR}/common.sh
 
-if [ "X${HELM_DEBUG}" == "Xtrue" ]; then
-  # template helm charts
-  helm template --namespace ${KUBE_NAMESPACE} -f ${LINKIS_CHART_DIR}/values.yaml ${HELM_RELEASE_NAME} ${LINKIS_CHART_DIR}
-else
-  # install helm charts
-  helm install --create-namespace --namespace ${KUBE_NAMESPACE} -f ${LINKIS_CHART_DIR}/values.yaml ${HELM_RELEASE_NAME} ${LINKIS_CHART_DIR}
+set -e
+
+LDH_VERSION=${LDH_VERSION-${PROJECT_VERSION}}
+echo "# LDH version: ${LDH_VERSION}"
+
+# load image
+if [ "X${KIND_LOAD_IMAGE}" == "Xtrue" ]; then
+  echo "# Loading LDH image ..."
+  kind load docker-image linkis-ldh:${PROJECT_VERSION} --name ${KIND_CLUSTER_NAME}
 fi
+
+# deploy LDH
+echo "# Deploying LDH ..."
+kubectl create ns ldh
+kubectl apply -n ldh -f ${RESOURCE_DIR}/ldh/configmaps
+
+LDH_VERSION=${LDH_VERSION} envsubst < ${RESOURCE_DIR}/ldh/ldh.yaml | kubectl apply -n ldh -f -
diff --git a/linkis-dist/helm/scripts/login-pod.sh b/linkis-dist/helm/scripts/install-mysql.sh
old mode 100644
new mode 100755
similarity index 66%
copy from linkis-dist/helm/scripts/login-pod.sh
copy to linkis-dist/helm/scripts/install-mysql.sh
index 1bb391928..208d09407
--- a/linkis-dist/helm/scripts/login-pod.sh
+++ b/linkis-dist/helm/scripts/install-mysql.sh
@@ -17,16 +17,19 @@
 
 WORK_DIR=`cd $(dirname $0); pwd -P`
 
-COMPONENT_NAME=$1
+. ${WORK_DIR}/common.sh
 
-LINKIS_KUBE_NAMESPACE=linkis
-LINKIS_INSTANCE_NAME=linkis-demo
+MYSQL_VERSION=${MYSQL_VERSION:-5.7}
 
-login() {
-  component_name=$1
-  echo "- login [${component_name}]'s bash ..."
-  POD_NAME=`kubectl get pods -n ${LINKIS_KUBE_NAMESPACE} -l app.kubernetes.io/instance=${LINKIS_INSTANCE_NAME}-${component_name} -o jsonpath='{.items[0].metadata.name}'`
-  kubectl exec -it -n ${LINKIS_KUBE_NAMESPACE} ${POD_NAME} -- bash
-}
+set -e
 
-login ${COMPONENT_NAME}
+# load image
+if [ "X${KIND_LOAD_IMAGE}" == "Xtrue" ]; then
+  echo "# Loading MySQL image ..."
+  kind load docker-image mysql:${MYSQL_VERSION} --name ${KIND_CLUSTER_NAME}
+fi
+
+# deploy mysql
+echo "# Deploying MySQL ..."
+kubectl create ns mysql
+kubectl apply -n mysql -f ${RESOURCE_DIR}/mysql.yaml
diff --git a/linkis-dist/helm/scripts/login-pod.sh b/linkis-dist/helm/scripts/login-pod.sh
old mode 100644
new mode 100755
diff --git a/linkis-dist/helm/scripts/remote-debug-proxy.sh b/linkis-dist/helm/scripts/remote-debug-proxy.sh
old mode 100644
new mode 100755
diff --git a/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-flink.yaml b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-flink.yaml
new file mode 100644
index 000000000..86d169d0e
--- /dev/null
+++ b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-flink.yaml
@@ -0,0 +1,562 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: flink-conf
+data:
+  flink-conf.yaml: |
+    #==============================================================================
+    # Common
+    #==============================================================================
+
+    # The external address of the host on which the JobManager runs and can be
+    # reached by the TaskManagers and any clients which want to connect. This setting
+    # is only used in Standalone mode and may be overwritten on the JobManager side
+    # by specifying the --host <hostname> parameter of the bin/jobmanager.sh executable.
+    # In high availability mode, if you use the bin/start-cluster.sh script and setup
+    # the conf/masters file, this will be taken care of automatically. Yarn
+    # automatically configure the host name based on the hostname of the node where the
+    # JobManager runs.
+
+    jobmanager.rpc.address: localhost
+
+    # The RPC port where the JobManager is reachable.
+
+    jobmanager.rpc.port: 6123
+
+
+    # The total process memory size for the JobManager.
+    #
+    # Note this accounts for all memory usage within the JobManager process, including JVM metaspace and other overhead.
+
+    jobmanager.memory.process.size: 1600m
+
+
+    # The total process memory size for the TaskManager.
+    #
+    # Note this accounts for all memory usage within the TaskManager process, including JVM metaspace and other overhead.
+
+    taskmanager.memory.process.size: 1728m
+
+    # To exclude JVM metaspace and overhead, please, use total Flink memory size instead of 'taskmanager.memory.process.size'.
+    # It is not recommended to set both 'taskmanager.memory.process.size' and Flink memory.
+    #
+    # taskmanager.memory.flink.size: 1280m
+
+    # The number of task slots that each TaskManager offers. Each slot runs one parallel pipeline.
+
+    taskmanager.numberOfTaskSlots: 1
+
+    # The parallelism used for programs that did not specify and other parallelism.
+
+    parallelism.default: 1
+
+    # The default file system scheme and authority.
+    #
+    # By default file paths without scheme are interpreted relative to the local
+    # root file system 'file:///'. Use this to override the default and interpret
+    # relative paths relative to a different file system,
+    # for example 'hdfs://mynamenode:12345'
+    #
+    # fs.default-scheme
+
+    env.log.dir: /var/log/flink
+
+    #==============================================================================
+    # High Availability
+    #==============================================================================
+
+    # The high-availability mode. Possible options are 'NONE' or 'zookeeper'.
+    #
+    # high-availability: zookeeper
+
+    # The path where metadata for master recovery is persisted. While ZooKeeper stores
+    # the small ground truth for checkpoint and leader election, this location stores
+    # the larger objects, like persisted dataflow graphs.
+    #
+    # Must be a durable file system that is accessible from all nodes
+    # (like HDFS, S3, Ceph, nfs, ...)
+    #
+    # high-availability.storageDir: hdfs:///flink/ha/
+
+    # The list of ZooKeeper quorum peers that coordinate the high-availability
+    # setup. This must be a list of the form:
+    # "host1:clientPort,host2:clientPort,..." (default clientPort: 2181)
+    #
+    # high-availability.zookeeper.quorum: localhost:2181
+
+
+    # ACL options are based on https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_BuiltinACLSchemes
+    # It can be either "creator" (ZOO_CREATE_ALL_ACL) or "open" (ZOO_OPEN_ACL_UNSAFE)
+    # The default value is "open" and it can be changed to "creator" if ZK security is enabled
+    #
+    # high-availability.zookeeper.client.acl: open
+
+    #==============================================================================
+    # Fault tolerance and checkpointing
+    #==============================================================================
+
+    # The backend that will be used to store operator state checkpoints if
+    # checkpointing is enabled. Checkpointing is enabled when execution.checkpointing.interval > 0.
+    #
+    # Execution checkpointing related parameters. Please refer to CheckpointConfig and ExecutionCheckpointingOptions for more details.
+    #
+    # execution.checkpointing.interval: 3min
+    # execution.checkpointing.externalized-checkpoint-retention: [DELETE_ON_CANCELLATION, RETAIN_ON_CANCELLATION]
+    # execution.checkpointing.max-concurrent-checkpoints: 1
+    # execution.checkpointing.min-pause: 0
+    # execution.checkpointing.mode: [EXACTLY_ONCE, AT_LEAST_ONCE]
+    # execution.checkpointing.timeout: 10min
+    # execution.checkpointing.tolerable-failed-checkpoints: 0
+    # execution.checkpointing.unaligned: false
+    #
+    # Supported backends are 'jobmanager', 'filesystem', 'rocksdb', or the
+    # <class-name-of-factory>.
+    #
+    # state.backend: filesystem
+
+    # Directory for checkpoints filesystem, when using any of the default bundled
+    # state backends.
+    #
+    # state.checkpoints.dir: hdfs://namenode-host:port/flink-checkpoints
+
+    # Default target directory for savepoints, optional.
+    #
+    # state.savepoints.dir: hdfs://namenode-host:port/flink-savepoints
+
+    # Flag to enable/disable incremental checkpoints for backends that
+    # support incremental checkpoints (like the RocksDB state backend).
+    #
+    # state.backend.incremental: false
+
+    # The failover strategy, i.e., how the job computation recovers from task failures.
+    # Only restart tasks that may have been affected by the task failure, which typically includes
+    # downstream tasks and potentially upstream tasks if their produced data is no longer available for consumption.
+
+    jobmanager.execution.failover-strategy: region
+
+    #==============================================================================
+    # Rest & web frontend
+    #==============================================================================
+
+    # The port to which the REST client connects to. If rest.bind-port has
+    # not been specified, then the server will bind to this port as well.
+    #
+    rest.port: 8090
+
+    # The address to which the REST client will connect to
+    #
+    #rest.address: 0.0.0.0
+
+    # Port range for the REST and web server to bind to.
+    #
+    #rest.bind-port: 8090-8100
+
+    # The address that the REST & web server binds to
+    #
+    #rest.bind-address: 0.0.0.0
+
+    # Flag to specify whether job submission is enabled from the web-based
+    # runtime monitor. Uncomment to disable.
+
+    #web.submit.enable: false
+
+    # Flag to specify whether job cancellation is enabled from the web-based
+    # runtime monitor. Uncomment to disable.
+
+    #web.cancel.enable: false
+
+    #==============================================================================
+    # Advanced
+    #==============================================================================
+
+    # Override the directories for temporary files. If not specified, the
+    # system-specific Java temporary directory (java.io.tmpdir property) is taken.
+    #
+    # For framework setups on Yarn, Flink will automatically pick up the
+    # containers' temp directories without any need for configuration.
+    #
+    # Add a delimited list for multiple directories, using the system directory
+    # delimiter (colon ':' on unix) or a comma, e.g.:
+    #     /data1/tmp:/data2/tmp:/data3/tmp
+    #
+    # Note: Each directory entry is read from and written to by a different I/O
+    # thread. You can include the same directory multiple times in order to create
+    # multiple I/O threads against that directory. This is for example relevant for
+    # high-throughput RAIDs.
+    #
+    # io.tmp.dirs: /tmp
+
+    # The classloading resolve order. Possible values are 'child-first' (Flink's default)
+    # and 'parent-first' (Java's default).
+    #
+    # Child first classloading allows users to use different dependency/library
+    # versions in their application than those in the classpath. Switching back
+    # to 'parent-first' may help with debugging dependency issues.
+    #
+    # classloader.resolve-order: child-first
+
+    # The amount of memory going to the network stack. These numbers usually need
+    # no tuning. Adjusting them may be necessary in case of an "Insufficient number
+    # of network buffers" error. The default min is 64MB, the default max is 1GB.
+    #
+    # taskmanager.memory.network.fraction: 0.1
+    # taskmanager.memory.network.min: 64mb
+    # taskmanager.memory.network.max: 1gb
+
+    #==============================================================================
+    # Flink Cluster Security Configuration
+    #==============================================================================
+
+    # Kerberos authentication for various components - Hadoop, ZooKeeper, and connectors -
+    # may be enabled in four steps:
+    # 1. configure the local krb5.conf file
+    # 2. provide Kerberos credentials (either a keytab or a ticket cache w/ kinit)
+    # 3. make the credentials available to various JAAS login contexts
+    # 4. configure the connector to use JAAS/SASL
+
+    # The below configure how Kerberos credentials are provided. A keytab will be used instead of
+    # a ticket cache if the keytab path and principal are set.
+
+    # security.kerberos.login.use-ticket-cache: true
+    # security.kerberos.login.keytab: /path/to/kerberos/keytab
+    # security.kerberos.login.principal: flink-user
+
+    # The configuration below defines which JAAS login contexts
+
+    # security.kerberos.login.contexts: Client,KafkaClient
+
+    #==============================================================================
+    # ZK Security Configuration
+    #==============================================================================
+
+    # Below configurations are applicable if ZK ensemble is configured for security
+
+    # Override below configuration to provide custom ZK service name if configured
+    # zookeeper.sasl.service-name: zookeeper
+
+    # The configuration below must match one of the values set in "security.kerberos.login.contexts"
+    # zookeeper.sasl.login-context-name: Client
+
+    #==============================================================================
+    # HistoryServer
+    #==============================================================================
+
+    # The HistoryServer is started and stopped via bin/historyserver.sh (start|stop)
+
+    # Directory to upload completed jobs to. Add this directory to the list of
+    # monitored directories of the HistoryServer as well (see below).
+    #jobmanager.archive.fs.dir: hdfs:///completed-jobs/
+
+    # The address under which the web-based HistoryServer listens.
+    #historyserver.web.address: 0.0.0.0
+
+    # The port under which the web-based HistoryServer listens.
+    #historyserver.web.port: 8082
+
+    # Comma separated list of directories to monitor for completed jobs.
+    #historyserver.archive.fs.dir: hdfs:///completed-jobs/
+
+    # Interval in milliseconds for refreshing the monitored directories.
+    #historyserver.archive.fs.refresh-interval: 10000
+
+  log4j.properties: |
+    # Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+    monitorInterval=30
+
+    # This affects logging for both user code and Flink
+    rootLogger.level = INFO
+    rootLogger.appenderRef.file.ref = MainAppender
+
+    # Uncomment this if you want to _only_ change Flink's logging
+    #logger.flink.name = org.apache.flink
+    #logger.flink.level = INFO
+
+    # The following lines keep the log level of common libraries/connectors on
+    # log level INFO. The root logger does not override this. You have to manually
+    # change the log levels here.
+    logger.akka.name = akka
+    logger.akka.level = INFO
+    logger.kafka.name= org.apache.kafka
+    logger.kafka.level = INFO
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = INFO
+    logger.shaded_zookeeper.name = org.apache.flink.shaded.zookeeper3
+    logger.shaded_zookeeper.level = INFO
+
+    # Log all infos in the given file
+    appender.main.name = MainAppender
+    appender.main.type = RollingFile
+    appender.main.append = true
+    appender.main.fileName = ${sys:log.file}
+    appender.main.filePattern = ${sys:log.file}.%i
+    appender.main.layout.type = PatternLayout
+    appender.main.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+    appender.main.policies.type = Policies
+    appender.main.policies.size.type = SizeBasedTriggeringPolicy
+    appender.main.policies.size.size = 100MB
+    appender.main.policies.startup.type = OnStartupTriggeringPolicy
+    appender.main.strategy.type = DefaultRolloverStrategy
+    appender.main.strategy.max = ${env:MAX_LOG_FILE_NUMBER:-10}
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
+
+  log4j-cli.properties: |
+    # Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+    monitorInterval=30
+
+    rootLogger.level = INFO
+    rootLogger.appenderRef.file.ref = FileAppender
+
+    # Log all infos in the given file
+    appender.file.name = FileAppender
+    appender.file.type = FILE
+    appender.file.append = false
+    appender.file.fileName = ${sys:log.file}
+    appender.file.layout.type = PatternLayout
+    appender.file.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Log output from org.apache.flink.yarn to the console. This is used by the
+    # CliFrontend class when using a per-job YARN cluster.
+    logger.yarn.name = org.apache.flink.yarn
+    logger.yarn.level = INFO
+    logger.yarn.appenderRef.console.ref = ConsoleAppender
+    logger.yarncli.name = org.apache.flink.yarn.cli.FlinkYarnSessionCli
+    logger.yarncli.level = INFO
+    logger.yarncli.appenderRef.console.ref = ConsoleAppender
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.hadoop.appenderRef.console.ref = ConsoleAppender
+
+    # Make sure hive logs go to the file.
+    logger.hive.name = org.apache.hadoop.hive
+    logger.hive.level = INFO
+    logger.hive.additivity = false
+    logger.hive.appenderRef.file.ref = FileAppender
+
+    # Log output from org.apache.flink.kubernetes to the console.
+    logger.kubernetes.name = org.apache.flink.kubernetes
+    logger.kubernetes.level = INFO
+    logger.kubernetes.appenderRef.console.ref = ConsoleAppender
+
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # suppress the warning that hadoop native libraries are not loaded (irrelevant for the client)
+    logger.hadoopnative.name = org.apache.hadoop.util.NativeCodeLoader
+    logger.hadoopnative.level = OFF
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
+
+  log4j-console.properties: |
+    # Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+    monitorInterval=30
+
+    # This affects logging for both user code and Flink
+    rootLogger.level = INFO
+    rootLogger.appenderRef.console.ref = ConsoleAppender
+    rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+    # Uncomment this if you want to _only_ change Flink's logging
+    #logger.flink.name = org.apache.flink
+    #logger.flink.level = INFO
+
+    # The following lines keep the log level of common libraries/connectors on
+    # log level INFO. The root logger does not override this. You have to manually
+    # change the log levels here.
+    logger.akka.name = akka
+    logger.akka.level = INFO
+    logger.kafka.name= org.apache.kafka
+    logger.kafka.level = INFO
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = INFO
+    logger.shaded_zookeeper.name = org.apache.flink.shaded.zookeeper3
+    logger.shaded_zookeeper.level = INFO
+
+    # Log all infos to the console
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Log all infos in the given rolling file
+    appender.rolling.name = RollingFileAppender
+    appender.rolling.type = RollingFile
+    appender.rolling.append = true
+    appender.rolling.fileName = ${sys:log.file}
+    appender.rolling.filePattern = ${sys:log.file}.%i
+    appender.rolling.layout.type = PatternLayout
+    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+    appender.rolling.policies.type = Policies
+    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+    appender.rolling.policies.size.size=100MB
+    appender.rolling.policies.startup.type = OnStartupTriggeringPolicy
+    appender.rolling.strategy.type = DefaultRolloverStrategy
+    appender.rolling.strategy.max = ${env:MAX_LOG_FILE_NUMBER:-10}
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
+
+  log4j-session.properties: |
+    # Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+    monitorInterval=30
+
+    rootLogger.level = INFO
+    rootLogger.appenderRef.console.ref = ConsoleAppender
+
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = WARN
+    logger.shaded_zookeeper.name = org.apache.flink.shaded.zookeeper3
+    logger.shaded_zookeeper.level = WARN
+    logger.curator.name = org.apache.flink.shaded.org.apache.curator.framework
+    logger.curator.level = WARN
+    logger.runtimeutils.name= org.apache.flink.runtime.util.ZooKeeperUtils
+    logger.runtimeutils.level = WARN
+    logger.runtimeleader.name = org.apache.flink.runtime.leaderretrieval.ZooKeeperLeaderRetrievalDriver
+    logger.runtimeleader.level = WARN
+
+  logback-session.xml: |
+    <configuration>
+        <appender name="file" class="ch.qos.logback.core.FileAppender">
+            <file>${log.file}</file>
+            <append>false</append>
+            <encoder>
+                <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+            </encoder>
+        </appender>
+
+        <appender name="console" class="ch.qos.logback.core.ConsoleAppender">
+            <encoder>
+                <pattern>%d{yyyy-MM-dd HH:mm:ss} %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+            </encoder>
+        </appender>
+
+        <logger name="ch.qos.logback" level="WARN" />
+        <root level="INFO">
+            <appender-ref ref="file"/>
+            <appender-ref ref="console"/>
+        </root>
+    </configuration>
+
+  logback-console.xml: |
+    <configuration>
+        <appender name="console" class="ch.qos.logback.core.ConsoleAppender">
+            <encoder>
+                <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+            </encoder>
+        </appender>
+
+        <appender name="rolling" class="ch.qos.logback.core.rolling.RollingFileAppender">
+            <file>${log.file}</file>
+            <append>false</append>
+
+            <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+                <fileNamePattern>${log.file}.%i</fileNamePattern>
+                <minIndex>1</minIndex>
+                <maxIndex>10</maxIndex>
+            </rollingPolicy>
+
+            <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+                <maxFileSize>100MB</maxFileSize>
+            </triggeringPolicy>
+
+            <encoder>
+                <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+            </encoder>
+        </appender>
+
+        <!-- This affects logging for both user code and Flink -->
+        <root level="INFO">
+            <appender-ref ref="console"/>
+            <appender-ref ref="rolling"/>
+        </root>
+
+        <!-- Uncomment this if you want to only change Flink's logging -->
+        <!--<logger name="org.apache.flink" level="INFO"/>-->
+
+        <!-- The following lines keep the log level of common libraries/connectors on
+             log level INFO. The root logger does not override this. You have to manually
+             change the log levels here. -->
+        <logger name="akka" level="INFO"/>
+        <logger name="org.apache.kafka" level="INFO"/>
+        <logger name="org.apache.hadoop" level="INFO"/>
+        <logger name="org.apache.zookeeper" level="INFO"/>
+
+        <!-- Suppress the irrelevant (wrong) warnings from the Netty channel handler -->
+        <logger name="org.jboss.netty.channel.DefaultChannelPipeline" level="ERROR"/>
+    </configuration>
+
+  logback.xml: |
+    <configuration>
+        <appender name="file" class="ch.qos.logback.core.FileAppender">
+            <file>${log.file}</file>
+            <append>false</append>
+            <encoder>
+                <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+            </encoder>
+        </appender>
+
+        <!-- This affects logging for both user code and Flink -->
+        <root level="INFO">
+            <appender-ref ref="file"/>
+        </root>
+
+        <!-- Uncomment this if you want to only change Flink's logging -->
+        <!--<logger name="org.apache.flink" level="INFO">-->
+            <!--<appender-ref ref="file"/>-->
+        <!--</logger>-->
+
+        <!-- The following lines keep the log level of common libraries/connectors on
+             log level INFO. The root logger does not override this. You have to manually
+             change the log levels here. -->
+        <logger name="akka" level="INFO">
+            <appender-ref ref="file"/>
+        </logger>
+        <logger name="org.apache.kafka" level="INFO">
+            <appender-ref ref="file"/>
+        </logger>
+        <logger name="org.apache.hadoop" level="INFO">
+            <appender-ref ref="file"/>
+        </logger>
+        <logger name="org.apache.zookeeper" level="INFO">
+            <appender-ref ref="file"/>
+        </logger>
+
+        <!-- Suppress the irrelevant (wrong) warnings from the Netty channel handler -->
+        <logger name="org.jboss.netty.channel.DefaultChannelPipeline" level="ERROR">
+            <appender-ref ref="file"/>
+        </logger>
+    </configuration>
diff --git a/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hadoop.yaml b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hadoop.yaml
new file mode 100644
index 000000000..354e23ef3
--- /dev/null
+++ b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hadoop.yaml
@@ -0,0 +1,659 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: hadoop-conf
+data:
+  log4j.properties: |
+    # Define some default values that can be overridden by system properties
+    hadoop.root.logger=INFO,console
+    hadoop.log.dir=/var/log/hadoop
+    hadoop.log.file=hadoop.log
+
+    # Define the root logger to the system property "hadoop.root.logger".
+    log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+    # Logging Threshold
+    log4j.threshold=ALL
+
+    # Null Appender
+    log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+    #
+    # Rolling File Appender - cap space usage at 5gb.
+    #
+    hadoop.log.maxfilesize=256MB
+    hadoop.log.maxbackupindex=20
+    log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+    log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+    log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+    log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+    log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+    # Pattern format: Date LogLevel LoggerName LogMessage
+    log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+    # Debugging Pattern format
+    #log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+    #
+    # Daily Rolling File Appender
+    #
+
+    log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+    log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+    # Rollover at midnight
+    log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+    log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+    # Pattern format: Date LogLevel LoggerName LogMessage
+    log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+    # Debugging Pattern format
+    #log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+    #
+    # console
+    # Add "console" to rootlogger above if you want to use this
+    #
+
+    log4j.appender.console=org.apache.log4j.ConsoleAppender
+    log4j.appender.console.target=System.err
+    log4j.appender.console.layout=org.apache.log4j.PatternLayout
+    log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+    #
+    # TaskLog Appender
+    #
+
+    #Default values
+    hadoop.tasklog.taskid=null
+    hadoop.tasklog.iscleanup=false
+    hadoop.tasklog.noKeepSplits=4
+    hadoop.tasklog.totalLogFileSize=100
+    hadoop.tasklog.purgeLogSplits=true
+    hadoop.tasklog.logsRetainHours=12
+
+    log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+    log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+    log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+    log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+    log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+    log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+    #
+    # HDFS block state change log from block manager
+    #
+    # Uncomment the following to suppress normal block state change
+    # messages from BlockManager in NameNode.
+    #log4j.logger.BlockStateChange=WARN
+
+    #
+    #Security appender
+    #
+    hadoop.security.logger=INFO,NullAppender
+    hadoop.security.log.maxfilesize=256MB
+    hadoop.security.log.maxbackupindex=20
+    log4j.category.SecurityLogger=${hadoop.security.logger}
+    hadoop.security.log.file=SecurityAuth-${user.name}.audit
+    log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+    log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+    log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+    log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+    log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+    log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+    #
+    # Daily Rolling Security appender
+    #
+    log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+    log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+    log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+    log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+    log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+    #
+    # hadoop configuration logging
+    #
+
+    # Uncomment the following line to turn off configuration deprecation warnings.
+    # log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+    #
+    # hdfs audit logging
+    #
+    hdfs.audit.logger=INFO,NullAppender
+    hdfs.audit.log.maxfilesize=256MB
+    hdfs.audit.log.maxbackupindex=20
+    log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+    log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+    log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+    log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+    log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+    log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+    log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+    log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
+
+    #
+    # mapred audit logging
+    #
+    mapred.audit.logger=INFO,NullAppender
+    mapred.audit.log.maxfilesize=256MB
+    mapred.audit.log.maxbackupindex=20
+    log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+    log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+    log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
+    log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+    log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+    log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+    log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+    log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
+
+    # Custom Logging levels
+
+    #log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+    #log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+    #log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
+
+    # Jets3t library
+    log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+    # AWS SDK & S3A FileSystem
+    log4j.logger.com.amazonaws=ERROR
+    log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
+    log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
+
+    #
+    # Event Counter Appender
+    # Sends counts of logging messages at different severity levels to Hadoop Metrics.
+    #
+    log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+    #
+    # Job Summary Appender
+    #
+    # Use following logger to send summary to separate file defined by
+    # hadoop.mapreduce.jobsummary.log.file :
+    # hadoop.mapreduce.jobsummary.logger=INFO,JSA
+    #
+    hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+    hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+    hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
+    hadoop.mapreduce.jobsummary.log.maxbackupindex=20
+    log4j.appender.JSA=org.apache.log4j.RollingFileAppender
+    log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+    log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+    log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
+    log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+    log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+    log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+    log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+    #
+    # Yarn ResourceManager Application Summary Log
+    #
+    # Set the ResourceManager summary log filename
+    yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+    # Set the ResourceManager summary log level and appender
+    yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+    #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+    # To enable AppSummaryLogging for the RM,
+    # set yarn.server.resourcemanager.appsummary.logger to
+    # <LEVEL>,RMSUMMARY in hadoop-env.sh
+
+    # Appender for ResourceManager Application Summary Log
+    # Requires the following properties to be set
+    #    - hadoop.log.dir (Hadoop Log directory)
+    #    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+    #    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+    log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+    log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+    log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+    log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+    log4j.appender.RMSUMMARY.MaxFileSize=256MB
+    log4j.appender.RMSUMMARY.MaxBackupIndex=20
+    log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+    log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+
+    # HS audit log configs
+    #mapreduce.hs.audit.logger=INFO,HSAUDIT
+    #log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
+    #log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
+    #log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
+    #log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
+    #log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
+    #log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+    #log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
+
+    # Http Server Request Logs
+    #log4j.logger.http.requests.namenode=INFO,namenoderequestlog
+    #log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+    #log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
+    #log4j.appender.namenoderequestlog.RetainDays=3
+
+    #log4j.logger.http.requests.datanode=INFO,datanoderequestlog
+    #log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+    #log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
+    #log4j.appender.datanoderequestlog.RetainDays=3
+
+    #log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
+    #log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+    #log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
+    #log4j.appender.resourcemanagerrequestlog.RetainDays=3
+
+    #log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
+    #log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+    #log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
+    #log4j.appender.jobhistoryrequestlog.RetainDays=3
+
+    #log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
+    #log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+    #log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
+    #log4j.appender.nodemanagerrequestlog.RetainDays=3
+
+  hadoop-env.sh: |
+    # Set Hadoop-specific environment variables here.
+
+    # The only required environment variable is JAVA_HOME.  All others are
+    # optional.  When running a distributed configuration it is best to
+    # set JAVA_HOME in this file, so that it is correctly defined on
+    # remote nodes.
+
+    # The java implementation to use.
+    export JAVA_HOME=/etc/alternatives/jre
+
+    # The jsvc implementation to use. Jsvc is required to run secure datanodes
+    # that bind to privileged ports to provide authentication of data transfer
+    # protocol.  Jsvc is not required if SASL is configured for authentication of
+    # data transfer protocol using non-privileged ports.
+    #export JSVC_HOME=${JSVC_HOME}
+
+    export HADOOP_CONF_DIR=/etc/ldh/hadoop
+
+    # Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+    for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+    if [ "$HADOOP_CLASSPATH" ]; then
+    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+    else
+    export HADOOP_CLASSPATH=$f
+    fi
+    done
+
+    # The maximum amount of heap to use, in MB. Default is 1000.
+    #export HADOOP_HEAPSIZE=
+    #export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+    # Enable extra debugging of Hadoop's JAAS binding, used to set up
+    # Kerberos security.
+    # export HADOOP_JAAS_DEBUG=true
+
+    # Extra Java runtime options.  Empty by default.
+    # For Kerberos debugging, an extended option set logs more invormation
+    # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
+    export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+    # Command specific options appended to HADOOP_OPTS when specified
+    export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
+    export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+
+    export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+    export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+    export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+    # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+    export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS"
+    # set heap args when HADOOP_HEAPSIZE is empty
+    if [ "$HADOOP_HEAPSIZE" = "" ]; then
+    export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+    fi
+    #HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+    # On secure datanodes, user to run the datanode as after dropping privileges.
+    # This **MUST** be uncommented to enable secure HDFS if using privileged ports
+    # to provide authentication of data transfer protocol.  This **MUST NOT** be
+    # defined if SASL is configured for authentication of data transfer protocol
+    # using non-privileged ports.
+    export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+    # Where log files are stored.  $HADOOP_HOME/logs by default.
+    #export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+    HADOOP_LOG_DIR=/var/log/hadoop
+
+    # Where log files are stored in the secure data environment.
+    #export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+    ###
+    # HDFS Mover specific parameters
+    ###
+    # Specify the JVM options to be used when starting the HDFS Mover.
+    # These options will be appended to the options specified as HADOOP_OPTS
+    # and therefore may override any similar flags set in HADOOP_OPTS
+    #
+    # export HADOOP_MOVER_OPTS=""
+
+    ###
+    # Router-based HDFS Federation specific parameters
+    # Specify the JVM options to be used when starting the RBF Routers.
+    # These options will be appended to the options specified as HADOOP_OPTS
+    # and therefore may override any similar flags set in HADOOP_OPTS
+    #
+    # export HADOOP_DFSROUTER_OPTS=""
+    ###
+
+    ###
+    # Advanced Users Only!
+    ###
+
+    # The directory where pid files are stored. /tmp by default.
+    # NOTE: this should be set to a directory that can only be written to by
+    #       the user that will run the hadoop daemons.  Otherwise there is the
+    #       potential for a symlink attack.
+    export HADOOP_PID_DIR=${HADOOP_PID_DIR}
+    export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+
+    # A string representing this instance of hadoop. $USER by default.
+    export HADOOP_IDENT_STRING=$USER
+
+  core-site.xml: |
+    <configuration>
+      <property>
+        <name>fs.defaultFS</name>
+        <value>hdfs://ldh.ldh.svc.cluster.local:9000</value>
+      </property>
+      <property>
+        <name>hadoop.tmp.dir</name>
+        <value>/data/hadoop</value>
+      </property>
+      <property>
+        <name>hadoop.proxyuser.root.groups</name>
+        <value>*</value>
+      </property>
+      <property>
+        <name>hadoop.proxyuser.root.hosts</name>
+        <value>*</value>
+      </property>
+      <property>
+        <name>hadoop.proxyuser.hadoop.groups</name>
+        <value>*</value>
+      </property>
+      <property>
+        <name>hadoop.proxyuser.hadoop.hosts</name>
+        <value>*</value>
+      </property>
+    </configuration>
+
+  hdfs-site.xml: |
+    <configuration>
+      <property>
+        <name>dfs.replication</name>
+        <value>1</value>
+      </property>
+    </configuration>
+  yarn-env.sh: |
+    # User for YARN daemons
+    export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+    # resolve links - $0 may be a softlink
+    export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+    # some Java parameters
+    # export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+    if [ "$JAVA_HOME" != "" ]; then
+      #echo "run java in $JAVA_HOME"
+      JAVA_HOME=$JAVA_HOME
+    fi
+
+    if [ "$JAVA_HOME" = "" ]; then
+      echo "Error: JAVA_HOME is not set."
+      exit 1
+    fi
+
+    JAVA=$JAVA_HOME/bin/java
+    JAVA_HEAP_MAX=-Xmx1000m
+
+    # For setting YARN specific HEAP sizes please use this
+    # Parameter and set appropriately
+    # YARN_HEAPSIZE=1000
+
+    # check envvars which might override default args
+    if [ "$YARN_HEAPSIZE" != "" ]; then
+      JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+    fi
+
+    # Resource Manager specific parameters
+
+    # Specify the max Heapsize for the ResourceManager using a numerical value
+    # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+    # the value to 1000.
+    # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+    # and/or YARN_RESOURCEMANAGER_OPTS.
+    # If not specified, the default value will be picked from either YARN_HEAPMAX
+    # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+    #export YARN_RESOURCEMANAGER_HEAPSIZE=1000
+
+    # Specify the max Heapsize for the timeline server using a numerical value
+    # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+    # the value to 1000.
+    # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+    # and/or YARN_TIMELINESERVER_OPTS.
+    # If not specified, the default value will be picked from either YARN_HEAPMAX
+    # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+    #export YARN_TIMELINESERVER_HEAPSIZE=1000
+
+    # Specify the JVM options to be used when starting the ResourceManager.
+    # These options will be appended to the options specified as YARN_OPTS
+    # and therefore may override any similar flags set in YARN_OPTS
+    #export YARN_RESOURCEMANAGER_OPTS=
+
+    # Node Manager specific parameters
+
+    # Specify the max Heapsize for the NodeManager using a numerical value
+    # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+    # the value to 1000.
+    # This value will be overridden by an Xmx setting specified in either YARN_OPTS
+    # and/or YARN_NODEMANAGER_OPTS.
+    # If not specified, the default value will be picked from either YARN_HEAPMAX
+    # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+    #export YARN_NODEMANAGER_HEAPSIZE=1000
+
+    # Specify the JVM options to be used when starting the NodeManager.
+    # These options will be appended to the options specified as YARN_OPTS
+    # and therefore may override any similar flags set in YARN_OPTS
+    #export YARN_NODEMANAGER_OPTS=
+
+    # so that filenames w/ spaces are handled correctly in loops below
+    IFS=
+
+
+    # default log directory & file
+    if [ "$YARN_LOG_DIR" = "" ]; then
+      YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+    fi
+    if [ "$YARN_LOGFILE" = "" ]; then
+      YARN_LOGFILE='yarn.log'
+    fi
+
+    # default policy file for service-level authorization
+    if [ "$YARN_POLICYFILE" = "" ]; then
+      YARN_POLICYFILE="hadoop-policy.xml"
+    fi
+
+    # restore ordinary behaviour
+    unset IFS
+
+
+    YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+    YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+    YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+    YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+    YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+    YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+    YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+    YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+    if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+      YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+    fi
+    YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"
+
+    ###
+    # Router specific parameters
+    ###
+
+    # Specify the JVM options to be used when starting the Router.
+    # These options will be appended to the options specified as HADOOP_OPTS
+    # and therefore may override any similar flags set in HADOOP_OPTS
+    #
+    # See ResourceManager for some examples
+    #
+    #export YARN_ROUTER_OPTS=
+  yarn-site.xml: |
+    <configuration>
+       <property>
+           <name>yarn.nodemanager.aux-services</name>
+           <value>mapreduce_shuffle</value>
+       </property>
+    </configuration>
+  capacity-scheduler.xml: |
+    <configuration>
+
+      <property>
+        <name>yarn.scheduler.capacity.maximum-applications</name>
+        <value>10000</value>
+        <description>
+          Maximum number of applications that can be pending and running.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+        <value>0.1</value>
+        <description>
+          Maximum percent of resources in the cluster which can be used to run
+          application masters i.e. controls number of concurrent running
+          applications.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.resource-calculator</name>
+        <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+        <description>
+          The ResourceCalculator implementation to be used to compare
+          Resources in the scheduler.
+          The default i.e. DefaultResourceCalculator only uses Memory while
+          DominantResourceCalculator uses dominant-resource to compare
+          multi-dimensional resources such as Memory, CPU etc.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.queues</name>
+        <value>default</value>
+        <description>
+          The queues at the this level (root is the root queue).
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.default.capacity</name>
+        <value>100</value>
+        <description>Default queue target capacity.</description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+        <value>1</value>
+        <description>
+          Default queue user limit a percentage from 0.0 to 1.0.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+        <value>100</value>
+        <description>
+          The maximum capacity of the default queue.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.default.state</name>
+        <value>RUNNING</value>
+        <description>
+          The state of the default queue. State can be one of RUNNING or STOPPED.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+        <value>*</value>
+        <description>
+          The ACL of who can submit jobs to the default queue.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
+        <value>*</value>
+        <description>
+          The ACL of who can administer jobs on the default queue.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.node-locality-delay</name>
+        <value>40</value>
+        <description>
+          Number of missed scheduling opportunities after which the CapacityScheduler
+          attempts to schedule rack-local containers.
+          Typically this should be set to number of nodes in the cluster, By default is setting
+          approximately number of nodes in one rack which is 40.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.queue-mappings</name>
+        <value></value>
+        <description>
+          A list of mappings that will be used to assign jobs to queues
+          The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
+          Typically this list will be used to map users to queues,
+          for example, u:%user:%user maps all users to queues with the same name
+          as the user.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
+        <value>false</value>
+        <description>
+          If a queue mapping is present, will it override the value specified
+          by the user? This can be used by administrators to place jobs in queues
+          that are different than the one specified by the user.
+          The default is false.
+        </description>
+      </property>
+
+    </configuration>
diff --git a/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hive.yaml b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hive.yaml
new file mode 100644
index 000000000..55826de7e
--- /dev/null
+++ b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hive.yaml
@@ -0,0 +1,230 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: hive-conf
+data:
+  hive-env.sh: |
+    # Hive Client memory usage can be an issue if a large number of clients
+    # are running at the same time. The flags below have been useful in
+    # reducing memory usage:
+    #
+    # if [ "$SERVICE" = "cli" ]; then
+    #   if [ -z "$DEBUG" ]; then
+    #     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+    #   else
+    #     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+    #   fi
+    # fi
+
+    # The heap size of the jvm stared by hive shell script can be controlled via:
+    #
+    # export HADOOP_HEAPSIZE=1024
+    #
+    # Larger heap size may be required when running queries over large number of files or partitions.
+    # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+    # appropriate for hive server.
+
+
+    # Set HADOOP_HOME to point to a specific hadoop install directory
+    HADOOP_HOME=/opt/ldh/current/hadoop
+
+    # Hive Configuration Directory can be controlled by:
+    export HIVE_CONF_DIR=/etc/ldh/hive
+
+    # Folder containing extra libraries required for hive compilation/execution can be controlled by:
+    # export HIVE_AUX_JARS_PATH=
+
+  hive-site.xml: |
+    <?xml version="1.0" encoding="UTF-8" standalone="no"?>
+    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+    <configuration>
+      <property>
+          <name>javax.jdo.option.ConnectionURL</name>
+          <value>jdbc:mysql://mysql.mysql.svc.cluster.local:3306/hive_metadata?&amp;createDatabaseIfNotExist=true&amp;characterEncoding=UTF-8&amp;useSSL=false</value>
+      </property>
+      <property>
+          <name>javax.jdo.option.ConnectionUserName</name>
+          <value>root</value>
+      </property>
+      <property>
+          <name>javax.jdo.option.ConnectionPassword</name>
+          <value>123456</value>
+      </property>
+      <property>
+          <name>javax.jdo.option.ConnectionDriverName</name>
+          <value>com.mysql.jdbc.Driver</value>
+      </property>
+      <property>
+          <name>datanucleus.schema.autoCreateAll</name>
+          <value>true</value>
+      </property>
+      <property>
+          <name>hive.metastore.schema.verification</name>
+          <value>false</value>
+      </property>
+      <property>
+        <name>hive.metastore.uris</name>
+        <value>thrift://ldh.ldh.svc.cluster.local:9083</value>
+      </property>
+    </configuration>
+
+  beeline-log4j2.properties: |
+    status = INFO
+    name = BeelineLog4j2
+    packages = org.apache.hadoop.hive.ql.log
+
+    # list of properties
+    property.hive.log.level = WARN
+    property.hive.root.logger = console
+
+    # list of all appenders
+    appenders = console
+
+    # console appender
+    appender.console.type = Console
+    appender.console.name = console
+    appender.console.target = SYSTEM_ERR
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+
+    # list of all loggers
+    loggers = HiveConnection
+
+    # HiveConnection logs useful info for dynamic service discovery
+    logger.HiveConnection.name = org.apache.hive.jdbc.HiveConnection
+    logger.HiveConnection.level = INFO
+
+    # root logger
+    rootLogger.level = ${sys:hive.log.level}
+    rootLogger.appenderRefs = root
+    rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
+
+  hive-exec-log4j2.properties: |
+    status = INFO
+    name = HiveExecLog4j2
+    packages = org.apache.hadoop.hive.ql.log
+
+    # list of properties
+    property.hive.log.level = INFO
+    property.hive.root.logger = FA
+    property.hive.query.id = hadoop
+    property.hive.log.dir = /var/log/hive/${sys:user.name}
+    property.hive.log.file = ${sys:hive.query.id}.log
+
+    # list of all appenders
+    appenders = console, FA
+
+    # console appender
+    appender.console.type = Console
+    appender.console.name = console
+    appender.console.target = SYSTEM_ERR
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
+
+    # simple file appender
+    appender.FA.type = RandomAccessFile
+    appender.FA.name = FA
+    appender.FA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
+    appender.FA.layout.type = PatternLayout
+    appender.FA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
+
+    # list of all loggers
+    loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX
+
+    logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+    logger.NIOServerCnxn.level = WARN
+
+    logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+    logger.ClientCnxnSocketNIO.level = WARN
+
+    logger.DataNucleus.name = DataNucleus
+    logger.DataNucleus.level = ERROR
+
+    logger.Datastore.name = Datastore
+    logger.Datastore.level = ERROR
+
+    logger.JPOX.name = JPOX
+    logger.JPOX.level = ERROR
+
+    # root logger
+    rootLogger.level = ${sys:hive.log.level}
+    rootLogger.appenderRefs = root
+    rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
+
+  hive-log4j2.properties: |
+    status = INFO
+    name = HiveLog4j2
+    packages = org.apache.hadoop.hive.ql.log
+
+    # list of properties
+    property.hive.log.level = INFO
+    property.hive.root.logger = DRFA
+    property.hive.log.dir = /var/log/hive/${sys:user.name}
+    property.hive.log.file = hive.log
+    property.hive.perflogger.log.level = INFO
+
+    # list of all appenders
+    appenders = console, DRFA
+
+    # console appender
+    appender.console.type = Console
+    appender.console.name = console
+    appender.console.target = SYSTEM_ERR
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
+
+    # daily rolling file appender
+    appender.DRFA.type = RollingRandomAccessFile
+    appender.DRFA.name = DRFA
+    appender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
+    # Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session
+    appender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}
+    appender.DRFA.layout.type = PatternLayout
+    appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
+    appender.DRFA.policies.type = Policies
+    appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy
+    appender.DRFA.policies.time.interval = 1
+    appender.DRFA.policies.time.modulate = true
+    appender.DRFA.strategy.type = DefaultRolloverStrategy
+    appender.DRFA.strategy.max = 30
+
+    # list of all loggers
+    loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, PerfLogger
+
+    logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+    logger.NIOServerCnxn.level = WARN
+
+    logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+    logger.ClientCnxnSocketNIO.level = WARN
+
+    logger.DataNucleus.name = DataNucleus
+    logger.DataNucleus.level = ERROR
+
+    logger.Datastore.name = Datastore
+    logger.Datastore.level = ERROR
+
+    logger.JPOX.name = JPOX
+    logger.JPOX.level = ERROR
+
+    logger.PerfLogger.name = org.apache.hadoop.hive.ql.log.PerfLogger
+    logger.PerfLogger.level = ${sys:hive.perflogger.log.level}
+
+    # root logger
+    rootLogger.level = ${sys:hive.log.level}
+    rootLogger.appenderRefs = root
+    rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
diff --git a/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-spark.yaml b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-spark.yaml
new file mode 100644
index 000000000..5d5185bfd
--- /dev/null
+++ b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-spark.yaml
@@ -0,0 +1,208 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: spark-conf
+data:
+  spark-env.sh: |
+    # This file is sourced when running various Spark programs.
+    # Copy it as spark-env.sh and edit that to configure Spark for your site.
+
+    # Options read when launching programs locally with
+    # ./bin/run-example or ./bin/spark-submit
+    # - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files
+    # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node
+    # - SPARK_PUBLIC_DNS, to set the public dns name of the driver program
+
+    # Options read by executors and drivers running inside the cluster
+    # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node
+    # - SPARK_PUBLIC_DNS, to set the public DNS name of the driver program
+    # - SPARK_LOCAL_DIRS, storage directories to use on this node for shuffle and RDD data
+    # - MESOS_NATIVE_JAVA_LIBRARY, to point to your libmesos.so if you use Mesos
+
+    # Options read in any mode
+    # - SPARK_CONF_DIR, Alternate conf dir. (Default: ${SPARK_HOME}/conf)
+    # - SPARK_EXECUTOR_CORES, Number of cores for the executors (Default: 1).
+    # - SPARK_EXECUTOR_MEMORY, Memory per Executor (e.g. 1000M, 2G) (Default: 1G)
+    # - SPARK_DRIVER_MEMORY, Memory for Driver (e.g. 1000M, 2G) (Default: 1G)
+
+    # Options read in any cluster manager using HDFS
+    # - HADOOP_CONF_DIR, to point Spark towards Hadoop configuration files
+
+    # Options read in YARN client/cluster mode
+    # - YARN_CONF_DIR, to point Spark towards YARN configuration files when you use YARN
+
+    # Options for the daemons used in the standalone deploy mode
+    # - SPARK_MASTER_HOST, to bind the master to a different IP address or hostname
+    # - SPARK_MASTER_PORT / SPARK_MASTER_WEBUI_PORT, to use non-default ports for the master
+    # - SPARK_MASTER_OPTS, to set config properties only for the master (e.g. "-Dx=y")
+    # - SPARK_WORKER_CORES, to set the number of cores to use on this machine
+    # - SPARK_WORKER_MEMORY, to set how much total memory workers have to give executors (e.g. 1000m, 2g)
+    # - SPARK_WORKER_PORT / SPARK_WORKER_WEBUI_PORT, to use non-default ports for the worker
+    # - SPARK_WORKER_DIR, to set the working directory of worker processes
+    # - SPARK_WORKER_OPTS, to set config properties only for the worker (e.g. "-Dx=y")
+    # - SPARK_DAEMON_MEMORY, to allocate to the master, worker and history server themselves (default: 1g).
+    # - SPARK_HISTORY_OPTS, to set config properties only for the history server (e.g. "-Dx=y")
+    # - SPARK_SHUFFLE_OPTS, to set config properties only for the external shuffle service (e.g. "-Dx=y")
+    # - SPARK_DAEMON_JAVA_OPTS, to set config properties for all daemons (e.g. "-Dx=y")
+    # - SPARK_DAEMON_CLASSPATH, to set the classpath for all daemons
+    # - SPARK_PUBLIC_DNS, to set the public dns name of the master or workers
+
+    # Options for launcher
+    # - SPARK_LAUNCHER_OPTS, to set config properties and Java options for the launcher (e.g. "-Dx=y")
+
+    # Generic options for the daemons used in the standalone deploy mode
+    # - SPARK_CONF_DIR      Alternate conf dir. (Default: ${SPARK_HOME}/conf)
+    # - SPARK_LOG_DIR       Where log files are stored.  (Default: ${SPARK_HOME}/logs)
+    # - SPARK_LOG_MAX_FILES Max log files of Spark daemons can rotate to. Default is 5.
+    # - SPARK_PID_DIR       Where the pid file is stored. (Default: /tmp)
+    # - SPARK_IDENT_STRING  A string representing this instance of spark. (Default: $USER)
+    # - SPARK_NICENESS      The scheduling priority for daemons. (Default: 0)
+    # - SPARK_NO_DAEMONIZE  Run the proposed command in the foreground. It will not output a PID file.
+    SPARK_LOG_DIR=/var/log/spark
+
+    # Options for native BLAS, like Intel MKL, OpenBLAS, and so on.
+    # You might get better performance to enable these options if using native BLAS (see SPARK-21305).
+    # - MKL_NUM_THREADS=1        Disable multi-threading of Intel MKL
+    # - OPENBLAS_NUM_THREADS=1   Disable multi-threading of OpenBLAS
+
+  spark-defaults.conf: |
+    # Default system properties included when running spark-submit.
+    # This is useful for setting default environmental settings.
+
+    spark.master yarn
+    spark.driver.extraLibraryPath /opt/ldh/current/hadoop/lib/native/
+    spark.eventLog.dir hdfs:///spark2-history/
+    spark.eventLog.enabled true
+    spark.executor.extraJavaOptions -XX:+UseNUMA
+    spark.executor.extraLibraryPath /opt/ldh/current/hadoop/lib/native
+    spark.history.fs.cleaner.enabled true
+    spark.history.fs.cleaner.interval 7d
+    spark.history.fs.cleaner.maxAge 90d
+    spark.history.fs.logDirectory hdfs:///spark2-history/
+    spark.history.kerberos.keytab none
+    spark.history.kerberos.principal none
+    spark.history.provider org.apache.spark.deploy.history.FsHistoryProvider
+    spark.history.store.path /data/spark/shs_db
+    spark.history.ui.port 18081
+    spark.io.compression.lz4.blockSize 128kb
+    spark.shuffle.file.buffer 1m
+    spark.shuffle.io.backLog 8192
+    spark.shuffle.io.serverThreads 128
+    spark.shuffle.unsafe.file.output.buffer 5m
+    spark.sql.autoBroadcastJoinThreshold 26214400
+    spark.sql.hive.convertMetastoreOrc true
+    spark.sql.hive.metastore.jars /opt/ldh/current/spark/jars/*
+    spark.sql.hive.metastore.version 2.3.3
+    spark.sql.orc.filterPushdown true
+    spark.sql.orc.impl native
+    spark.sql.queryExecutionListeners
+    spark.sql.statistics.fallBackToHdfs true
+    spark.sql.streaming.streamingQueryListeners
+    spark.sql.warehouse.dir /warehouse/tablespace/managed/hive
+    spark.unsafe.sorter.spill.reader.buffer.size 1m
+    spark.yarn.dist.files
+    spark.yarn.historyServer.address ldh.ldh.svc.cluster.local:18081
+    spark.yarn.queue default
+
+  log4j2.properties: |
+    # Set everything to be logged to the console
+    rootLogger.level = info
+    rootLogger.appenderRef.stdout.ref = console
+
+    # In the pattern layout configuration below, we specify an explicit `%ex` conversion
+    # pattern for logging Throwables. If this was omitted, then (by default) Log4J would
+    # implicitly add an `%xEx` conversion pattern which logs stacktraces with additional
+    # class packaging information. That extra information can sometimes add a substantial
+    # performance overhead, so we disable it in our default logging config.
+    # For more information, see SPARK-39361.
+    appender.console.type = Console
+    appender.console.name = console
+    appender.console.target = SYSTEM_ERR
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n%ex
+
+    # Set the default spark-shell/spark-sql log level to WARN. When running the
+    # spark-shell/spark-sql, the log level for these classes is used to overwrite
+    # the root logger's log level, so that the user can have different defaults
+    # for the shell and regular Spark apps.
+    logger.repl.name = org.apache.spark.repl.Main
+    logger.repl.level = warn
+
+    logger.thriftserver.name = org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver
+    logger.thriftserver.level = warn
+
+    # Settings to quiet third party logs that are too verbose
+    logger.jetty1.name = org.sparkproject.jetty
+    logger.jetty1.level = warn
+    logger.jetty2.name = org.sparkproject.jetty.util.component.AbstractLifeCycle
+    logger.jetty2.level = error
+    logger.replexprTyper.name = org.apache.spark.repl.SparkIMain$exprTyper
+    logger.replexprTyper.level = info
+    logger.replSparkILoopInterpreter.name = org.apache.spark.repl.SparkILoop$SparkILoopInterpreter
+    logger.replSparkILoopInterpreter.level = info
+    logger.parquet1.name = org.apache.parquet
+    logger.parquet1.level = error
+    logger.parquet2.name = parquet
+    logger.parquet2.level = error
+
+    # SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
+    logger.RetryingHMSHandler.name = org.apache.hadoop.hive.metastore.RetryingHMSHandler
+    logger.RetryingHMSHandler.level = fatal
+    logger.FunctionRegistry.name = org.apache.hadoop.hive.ql.exec.FunctionRegistry
+    logger.FunctionRegistry.level = error
+
+    # For deploying Spark ThriftServer
+    # SPARK-34128: Suppress undesirable TTransportException warnings involved in THRIFT-4805
+    appender.console.filter.1.type = RegexFilter
+    appender.console.filter.1.regex = .*Thrift error occurred during processing of message.*
+    appender.console.filter.1.onMatch = deny
+    appender.console.filter.1.onMismatch = neutral
+
+  hive-site.xml: |
+    <?xml version="1.0" encoding="UTF-8" standalone="no"?>
+    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+    <configuration>
+      <property>
+          <name>javax.jdo.option.ConnectionURL</name>
+          <value>jdbc:mysql://mysql.mysql.svc.cluster.local:3306/hive_metadata?&amp;createDatabaseIfNotExist=true&amp;characterEncoding=UTF-8&amp;useSSL=false</value>
+      </property>
+      <property>
+          <name>javax.jdo.option.ConnectionUserName</name>
+          <value>root</value>
+      </property>
+      <property>
+          <name>javax.jdo.option.ConnectionPassword</name>
+          <value>123456</value>
+      </property>
+      <property>
+          <name>javax.jdo.option.ConnectionDriverName</name>
+          <value>com.mysql.jdbc.Driver</value>
+      </property>
+      <property>
+          <name>datanucleus.schema.autoCreateAll</name>
+          <value>true</value>
+      </property>
+      <property>
+          <name>hive.metastore.schema.verification</name>
+          <value>false</value>
+      </property>
+      <property>
+        <name>hive.metastore.uris</name>
+        <value>thrift://ldh.ldh.svc.cluster.local:9083</value>
+      </property>
+    </configuration>
diff --git a/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-zookeeper.yaml b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-zookeeper.yaml
new file mode 100644
index 000000000..a3ad5fe17
--- /dev/null
+++ b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-zookeeper.yaml
@@ -0,0 +1,98 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: zookeeper-conf
+data:
+  zoo.cfg: |
+    # The number of milliseconds of each tick
+    tickTime=2000
+    # The number of ticks that the initial
+    # synchronization phase can take
+    initLimit=10
+    # The number of ticks that can pass between
+    # sending a request and getting an acknowledgement
+    syncLimit=5
+    # the directory where the snapshot is stored.
+    # do not use /tmp for storage, /tmp here is just
+    # example sakes.
+    dataDir=/data/zookeeper
+    # the port at which the clients will connect
+    clientPort=2181
+    # the maximum number of client connections.
+    # increase this if you need to handle more clients
+    #maxClientCnxns=60
+    #
+    # Be sure to read the maintenance section of the
+    # administrator guide before turning on autopurge.
+    #
+    # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+    #
+    # The number of snapshots to retain in dataDir
+    #autopurge.snapRetainCount=3
+    # Purge task interval in hours
+    # Set to "0" to disable auto purge feature
+    #autopurge.purgeInterval=1
+
+  log4j.properties: |
+    # Define some default values that can be overridden by system properties
+    zookeeper.root.logger=INFO, CONSOLE
+
+    zookeeper.console.threshold=WARN
+
+    zookeeper.log.dir=/var/log/zookeeper
+    zookeeper.log.file=zookeeper.log
+    zookeeper.log.threshold=INFO
+    zookeeper.log.maxfilesize=256MB
+    zookeeper.log.maxbackupindex=20
+
+    zookeeper.tracelog.dir=${zookeeper.log.dir}
+    zookeeper.tracelog.file=zookeeper_trace.log
+
+    log4j.rootLogger=${zookeeper.root.logger}
+
+    #
+    # console
+    # Add "console" to rootlogger above if you want to use this
+    #
+    log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+    log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold}
+    log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+    log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+
+    #
+    # Add ROLLINGFILE to rootLogger to get log file output
+    #
+    log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+    log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold}
+    log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file}
+    log4j.appender.ROLLINGFILE.MaxFileSize=${zookeeper.log.maxfilesize}
+    log4j.appender.ROLLINGFILE.MaxBackupIndex=${zookeeper.log.maxbackupindex}
+    log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+    log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+
+    #
+    # Add TRACEFILE to rootLogger to get log file output
+    #    Log TRACE level and above messages to a log file
+    #
+    log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+    log4j.appender.TRACEFILE.Threshold=TRACE
+    log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file}
+
+    log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+    ### Notice we are including log4j's NDC here (%x)
+    log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n
diff --git a/linkis-dist/helm/scripts/resources/ldh/ldh.yaml b/linkis-dist/helm/scripts/resources/ldh/ldh.yaml
new file mode 100644
index 000000000..6c2437e80
--- /dev/null
+++ b/linkis-dist/helm/scripts/resources/ldh/ldh.yaml
@@ -0,0 +1,216 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: ldh
+spec:
+  selector:
+    app: ldh
+  clusterIP: None
+  ports:
+    # hdfs
+    - name: nn-webui
+      port: 50070
+    - name: nn-ipc
+      port: 9000
+    - name: dn-webui
+      port: 50075
+    - name: dn-ipc
+      port: 50020
+    - name: dn-tf
+      port: 50010
+    # yarn
+    - name: rm-scheduler
+      port: 8030
+    - name: rm-tracker
+      port: 8031
+    - name: rm-ipc
+      port: 8032
+    - name: rm-admin
+      port: 8033
+    - name: nm-localizer
+      port: 8040
+    - name: nm-webui
+      port: 8042
+    - name: rm-webui
+      port: 8088
+    # hive
+    - name: metastore
+      port: 9083
+    - name: hiveserver2
+      port: 10000
+    # spark
+    - name: spark-hs
+      port: 18081
+    # flink
+    - name: flink-yarn-jm
+      port: 8090
+    # zookeeper
+    - name: zk-client
+      port: 2181
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: ldh
+spec:
+  selector:
+    matchLabels:
+      app: ldh
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      labels:
+        app: ldh
+    spec:
+      volumes:
+        - name: data-dir
+          emptyDir: {}
+        - name: hadoop-conf
+          configMap:
+            name: hadoop-conf
+            items:
+              - key: hadoop-env.sh
+                path: hadoop-env.sh
+              - key: core-site.xml
+                path: core-site.xml
+              - key: hdfs-site.xml
+                path: hdfs-site.xml
+              - key: yarn-env.sh
+                path: yarn-env.sh
+              - key: yarn-site.xml
+                path: yarn-site.xml
+              - key: capacity-scheduler.xml
+                path: capacity-scheduler.xml
+              - key: log4j.properties
+                path: log4j.properties
+        - name: hive-conf
+          configMap:
+            name: hive-conf
+            items:
+              - key: hive-env.sh
+                path: hive-env.sh
+              - key: hive-site.xml
+                path: hive-site.xml
+              - key: hive-log4j2.properties
+                path: hive-log4j2.properties
+              - key: beeline-log4j2.properties
+                path: beeline-log4j2.properties
+              - key: hive-exec-log4j2.properties
+                path: hive-exec-log4j2.properties
+        - name: spark-conf
+          configMap:
+            name: spark-conf
+            items:
+              - key: spark-env.sh
+                path: spark-env.sh
+              - key: hive-site.xml
+                path: hive-site.xml
+              - key: spark-defaults.conf
+                path: spark-defaults.conf
+              - key: log4j2.properties
+                path: log4j2.properties
+        - name: flink-conf
+          configMap:
+            name: flink-conf
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-cli.properties
+                path: log4j-cli.properties
+              - key: log4j-console.properties
+                path: log4j-console.properties
+              - key: log4j-session.properties
+                path: log4j-session.properties
+              - key: log4j.properties
+                path: log4j.properties
+              - key: logback-console.xml
+                path: logback-console.xml
+              - key: logback-session.xml
+                path: logback-session.xml
+              - key: logback.xml
+                path: logback.xml
+        - name: zookeeper-conf
+          configMap:
+            name: zookeeper-conf
+            items:
+              - key: zoo.cfg
+                path: zoo.cfg
+              - key: log4j.properties
+                path: log4j.properties
+      containers:
+        - name: ldh
+          image: linkis-ldh:${LDH_VERSION}
+          command:
+            - /bin/bash
+            - -ecx
+            - exec /usr/bin/start-all.sh
+          ports:
+            # hdfs
+            - name: nn-webui
+              containerPort: 50070
+            - name: nn-ipc
+              containerPort: 9000
+            - name: dn-webui
+              containerPort: 50075
+            - name: dn-ipc
+              containerPort: 50020
+            - name: dn-tf
+              containerPort: 50010
+            # yarn
+            - name: rm-scheduler
+              containerPort: 8030
+            - name: rm-tracker
+              containerPort: 8031
+            - name: rm-ipc
+              containerPort: 8032
+            - name: rm-admin
+              containerPort: 8033
+            - name: nm-localizer
+              containerPort: 8040
+            - name: nm-webui
+              containerPort: 8042
+            - name: rm-webui
+              containerPort: 8088
+            # hive
+            - name: metastore
+              containerPort: 9083
+            - name: hiveserver2
+              containerPort: 10000
+            # spark
+            - name: spark-hs
+              containerPort: 18081
+            # flink
+            - name: flink-yarn-jm
+              containerPort: 8090
+            # zookeeper
+            - name: zk-client
+              containerPort: 2181
+          volumeMounts:
+            - name: data-dir
+              mountPath: /data
+            - name: hadoop-conf
+              mountPath: /etc/ldh/hadoop
+            - name: hive-conf
+              mountPath: /etc/ldh/hive
+            - name: spark-conf
+              mountPath: /etc/ldh/spark
+            - name: flink-conf
+              mountPath: /etc/ldh/flink
+            - name: zookeeper-conf
+              mountPath: /etc/ldh/zookeeper
diff --git a/linkis-dist/pom.xml b/linkis-dist/pom.xml
index 545fb20ba..1a20820f5 100644
--- a/linkis-dist/pom.xml
+++ b/linkis-dist/pom.xml
@@ -232,6 +232,12 @@
                 <linkis.home>/opt/linkis</linkis.home>
                 <linkis.conf.dir>/etc/linkis-conf</linkis.conf.dir>
                 <linkis.log.dir>/var/logs/linkis</linkis.log.dir>
+                <ldh.hadoop.version>2.7.2</ldh.hadoop.version>
+                <ldh.hive.version>2.3.3</ldh.hive.version>
+                <ldh.spark.version>2.4.3</ldh.spark.version>
+                <ldh.spark.hadoop.version>2.7</ldh.spark.hadoop.version>
+                <ldh.flink.version>1.12.2</ldh.flink.version>
+                <ldh.zookeeper.version>3.5.9</ldh.zookeeper.version>
             </properties>
             <build>
                 <plugins>
@@ -249,7 +255,7 @@
                                     <target name="linkis-image">
                                         <exec executable="docker" failonerror="true" >
                                             <arg value="build" />
-                                            <arg value="-f"          /> <arg value="${project.basedir}/docker/Dockerfile" />
+                                            <arg value="-f"          /> <arg value="${project.basedir}/docker/linkis.Dockerfile" />
                                             <arg value="-t"          /> <arg value="${project.parent.artifactId}:${project.version}" />
                                             <arg value="--target"    /> <arg value="linkis" />
                                             <arg value="--build-arg" /> <arg value="IMAGE_BASE=${image.base}" />
@@ -283,7 +289,7 @@
                                         </copy>
                                         <exec executable="docker" failonerror="true" >
                                             <arg value="build" />
-                                            <arg value="-f"          /> <arg value="${project.basedir}/docker/Dockerfile" />
+                                            <arg value="-f"          /> <arg value="${project.basedir}/docker/linkis.Dockerfile" />
                                             <arg value="-t"          /> <arg value="${project.parent.artifactId}-web:${project.version}" />
                                             <arg value="--target"    /> <arg value="linkis-web" />
                                             <arg value="--build-arg" /> <arg value="IMAGE_BASE_WEB=${image.base.web}" />
@@ -294,6 +300,45 @@
                                     </target>
                                 </configuration>
                             </execution>
+                            <execution>
+                                <id>build-linkis-ldh-image</id>
+                                <phase>install</phase>
+                                <goals>
+                                    <goal>run</goal>
+                                </goals>
+                                <configuration>
+                                    <target name="linkis-ldh-image" if="linkis.build.ldh">
+                                        <echo message="Building linkis ldh (hadoop all in one) image ..." />
+                                        <exec executable="sh" failonerror="true" >
+                                            <arg value="${basedir}/docker/scripts/prepare-ldh-image.sh" />
+                                        </exec>
+                                        <exec executable="docker" failonerror="true" >
+                                            <arg value="build" />
+                                            <arg value="-f"          /> <arg value="${project.basedir}/docker/ldh.Dockerfile" />
+                                            <arg value="-t"          /> <arg value="${project.parent.artifactId}-ldh:${project.version}" />
+                                            <arg value="--target"    /> <arg value="linkis-ldh" />
+                                            <arg value="--build-arg" /> <arg value="IMAGE_BASE=${image.base}" />
+                                            <arg value="--build-arg" /> <arg value="JDK_VERSION=${jdk.version}" />
+                                            <arg value="--build-arg" /> <arg value="JDK_BUILD_REVISION=${jdk.build.revision}" />
+                                            <arg value="--build-arg" /> <arg value="LINKIS_VERSION=${project.version}" />
+                                            <arg value="--build-arg" /> <arg value="MYSQL_JDBC_VERSION=${mysql.connector.version}" />
+                                            <arg value="--build-arg" /> <arg value="BUILD_TYPE=${image.build.type}" />
+                                            <arg value="--build-arg" /> <arg value="LINKIS_SYSTEM_USER=${linkis.system.user}" />
+                                            <arg value="--build-arg" /> <arg value="LINKIS_SYSTEM_UID=${linkis.system.uid}" />
+                                            <arg value="--build-arg" /> <arg value="LINKIS_HOME=${linkis.home}" />
+                                            <arg value="--build-arg" /> <arg value="LINKIS_CONF_DIR=${linkis.conf.dir}" />
+                                            <arg value="--build-arg" /> <arg value="LINKIS_LOG_DIR=${linkis.log.dir}" />
+                                            <arg value="--build-arg" /> <arg value="HADOOP_VERSION=${ldh.hadoop.version}" />
+                                            <arg value="--build-arg" /> <arg value="HIVE_VERSION=${ldh.hive.version}" />
+                                            <arg value="--build-arg" /> <arg value="SPARK_VERSION=${ldh.spark.version}" />
+                                            <arg value="--build-arg" /> <arg value="SPARK_HADOOP_VERSION=${ldh.spark.hadoop.version}" />
+                                            <arg value="--build-arg" /> <arg value="ZOOKEEPER_VERSION=${ldh.zookeeper.version}" />
+                                            <arg value="--build-arg" /> <arg value="FLINK_VERSION=${ldh.flink.version}" />
+                                            <arg value="${project.build.directory}" />
+                                        </exec>
+                                    </target>
+                                </configuration>
+                            </execution>
                         </executions>
                     </plugin>
                 </plugins>


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@linkis.apache.org
For additional commands, e-mail: commits-help@linkis.apache.org