You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kylin.apache.org by xx...@apache.org on 2020/05/05 17:35:31 UTC

[kylin] branch master updated (b53ba2d -> 50c374b)

This is an automated email from the ASF dual-hosted git repository.

xxyu pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/kylin.git.


    from b53ba2d  KYLIN-4470 The user cannot log in kylin normally after being assigned to a group
     new 0fdf776  KYLIN-4181 Schedule Kylin using Kubernetes
     new f78c2f1  KYLIN-4447 Kylin on kubernetes in a production env
     new 23aad89  KYLIN-4447 Refactor and merge two related commits
     new 50c374b  KYLIN-4447 Upload a complete example for CDH5.7 env(2 job + 2 query)

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .gitignore                                         |   3 +-
 kubernetes/README.md                               |  37 ++
 kubernetes/config/README.md                        |   3 +
 kubernetes/config/production/filebeat/filebeat.yml | 125 ++++
 kubernetes/config/production/hadoop/core-site.xml  |  19 +
 kubernetes/config/production/hadoop/hbase-site.xml |  19 +
 kubernetes/config/production/hadoop/hdfs-site.xml  |  19 +
 kubernetes/config/production/hadoop/hive-site.xml  |  19 +
 .../config/production/hadoop/mapred-site.xml       |  19 +
 kubernetes/config/production/hadoop/yarn-site.xml  |  19 +
 .../production/kylin-job}/kylin-kafka-consumer.xml |   0
 .../kylin-job/kylin-server-log4j.properties        |  30 +
 .../kylin-job}/kylin-spark-log4j.properties        |   0
 .../kylin-job/kylin-tools-log4j.properties         |  38 ++
 .../config/production/kylin-job/kylin.properties   | 420 ++++++++++++++
 .../production/kylin-job}/kylin_hive_conf.xml      |   0
 .../production/kylin-job}/kylin_job_conf.xml       |   0
 .../kylin-job}/kylin_job_conf_cube_merge.xml       |   0
 .../production/kylin-job}/kylin_job_conf_inmem.xml |   0
 .../config/production/kylin-job}/setenv-tool.sh    |   0
 kubernetes/config/production/kylin-job/setenv.sh   |  73 +++
 .../production/kylin-more/applicationContext.xml   | 125 ++++
 .../config/production/kylin-more}/ehcache-test.xml |   0
 .../config/production/kylin-more}/ehcache.xml      |   0
 .../config/production/kylin-more}/kylinMetrics.xml |   0
 .../config/production/kylin-more/kylinSecurity.xml | 634 +++++++++++++++++++++
 .../kylin-query}/kylin-kafka-consumer.xml          |   0
 .../kylin-query/kylin-server-log4j.properties      |  30 +
 .../kylin-query}/kylin-spark-log4j.properties      |   0
 .../kylin-query/kylin-tools-log4j.properties       |  38 ++
 .../config/production/kylin-query/kylin.properties | 420 ++++++++++++++
 .../production/kylin-query}/kylin_hive_conf.xml    |   0
 .../production/kylin-query}/kylin_job_conf.xml     |   0
 .../kylin-query}/kylin_job_conf_cube_merge.xml     |   0
 .../kylin-query}/kylin_job_conf_inmem.xml          |   0
 .../config/production/kylin-query}/setenv-tool.sh  |   0
 kubernetes/config/production/kylin-query/setenv.sh |  73 +++
 .../production/streaming-receiver/kylin.properties | 413 ++++++++++++++
 .../config/production/streaming-receiver/setenv.sh |  73 +++
 kubernetes/config/production/tomcat/context.xml    |  48 ++
 kubernetes/config/production/tomcat/server.xml     | 142 +++++
 kubernetes/config/quickstart/hadoop/core-site.xml  |  19 +
 kubernetes/config/quickstart/hadoop/hbase-site.xml |  19 +
 kubernetes/config/quickstart/hadoop/hdfs-site.xml  |  19 +
 kubernetes/config/quickstart/hadoop/hive-site.xml  |  19 +
 .../config/quickstart/hadoop/mapred-site.xml       |  19 +
 kubernetes/config/quickstart/hadoop/yarn-site.xml  |  19 +
 .../quickstart/kylin}/kylin-kafka-consumer.xml     |   0
 .../quickstart/kylin/kylin-server-log4j.properties |  30 +
 .../quickstart/kylin}/kylin-spark-log4j.properties |   0
 .../quickstart/kylin/kylin-tools-log4j.properties  |  38 ++
 .../config/quickstart/kylin/kylin.properties       | 413 ++++++++++++++
 .../config/quickstart/kylin}/kylin_hive_conf.xml   |   0
 .../config/quickstart/kylin}/kylin_job_conf.xml    |   0
 .../kylin}/kylin_job_conf_cube_merge.xml           |   0
 .../quickstart/kylin}/kylin_job_conf_inmem.xml     |   0
 .../config/quickstart/kylin}/setenv-tool.sh        |   0
 kubernetes/config/quickstart/kylin/setenv.sh       |  73 +++
 kubernetes/docker/README.md                        |  10 +
 kubernetes/docker/hadoop-client/CDH57/Dockerfile   |  66 +++
 .../docker/hadoop-client/CDH57/build-image.sh      |  17 +
 kubernetes/docker/hadoop-client/README.md          |  24 +
 .../hadoop-client/apache-hadoop2.7/Dockerfile      |  80 +++
 .../hadoop-client/apache-hadoop2.7/build-image.sh  |  17 +
 kubernetes/docker/kylin-client/Dockerfile          |  82 +++
 kubernetes/docker/kylin-client/README.md           |   9 +
 kubernetes/docker/kylin-client/bin/bootstrap.sh    |  34 ++
 .../docker/kylin-client/bin/check-liveness.sh      |  18 +
 .../docker/kylin-client/bin/check-readiness.sh     |  18 +
 kubernetes/docker/kylin-client/bin/clean-log.sh    |  40 ++
 kubernetes/docker/kylin-client/build-image.sh      |  20 +
 kubernetes/docker/kylin-client/crontab.txt         |   1 +
 kubernetes/template/production/cleanup.sh          |  21 +
 kubernetes/template/production/deploy-kylin.sh     | 106 ++++
 .../deployment/kylin/kylin-job-statefulset.yaml    | 124 ++++
 .../deployment/kylin/kylin-query-statefulset.yaml  | 118 ++++
 .../production/deployment/kylin/kylin-service.yaml |  34 ++
 .../deployment/memcached/memcached-service.yaml    |  29 +
 .../memcached/memcached-statefulset.yaml           |  60 ++
 .../streaming/kylin-receiver-statefulset.yaml      |  83 +++
 kubernetes/template/production/example/README.md   | 173 ++++++
 .../example/config/filebeat/filebeat.yml           | 125 ++++
 .../production/example/config/hadoop/core-site.xml | 147 +++++
 .../example/config/hadoop/hbase-site.xml           | 123 ++++
 .../production/example/config/hadoop/hdfs-site.xml |  83 +++
 .../production/example/config/hadoop/hive-site.xml | 235 ++++++++
 .../example/config/hadoop/mapred-site.xml          | 191 +++++++
 .../production/example/config/hadoop/yarn-site.xml | 135 +++++
 .../config/kylin-job}/kylin-kafka-consumer.xml     |   0
 .../config/kylin-job/kylin-server-log4j.properties |  30 +
 .../config/kylin-job}/kylin-spark-log4j.properties |   0
 .../config/kylin-job/kylin-tools-log4j.properties  |  38 ++
 .../example/config/kylin-job/kylin.properties      | 422 ++++++++++++++
 .../example/config/kylin-job}/kylin_hive_conf.xml  |   0
 .../example/config/kylin-job}/kylin_job_conf.xml   |   0
 .../kylin-job}/kylin_job_conf_cube_merge.xml       |   0
 .../config/kylin-job}/kylin_job_conf_inmem.xml     |   0
 .../example/config/kylin-job}/setenv-tool.sh       |   0
 .../production/example/config/kylin-job/setenv.sh  |  73 +++
 .../config/kylin-more/applicationContext.xml       | 124 ++++
 .../example/config/kylin-more}/ehcache-test.xml    |   0
 .../example/config/kylin-more}/ehcache.xml         |   0
 .../example/config/kylin-more}/kylinMetrics.xml    |   0
 .../example/config/kylin-more/kylinSecurity.xml    | 634 +++++++++++++++++++++
 .../config/kylin-query}/kylin-kafka-consumer.xml   |   0
 .../kylin-query/kylin-server-log4j.properties      |  30 +
 .../kylin-query}/kylin-spark-log4j.properties      |   0
 .../kylin-query/kylin-tools-log4j.properties       |  38 ++
 .../example/config/kylin-query/kylin.properties    | 419 ++++++++++++++
 .../config/kylin-query}/kylin_hive_conf.xml        |   0
 .../example/config/kylin-query}/kylin_job_conf.xml |   0
 .../kylin-query}/kylin_job_conf_cube_merge.xml     |   0
 .../config/kylin-query}/kylin_job_conf_inmem.xml   |   0
 .../example/config/kylin-query/setenv-tool.sh      |  73 +++
 .../example/config/kylin-query/setenv.sh           |  73 +++
 .../production/example/config/tomcat/context.xml   |  48 ++
 .../production/example/config/tomcat/server.xml    | 142 +++++
 .../example/deployment/deploy-sample-cluster.sh    |  93 +++
 .../deployment/kylin-job/kylin-job-service.yaml    |  36 ++
 .../kylin-job/kylin-job-statefulset.yaml           | 130 +++++
 .../kylin-query/kylin-query-statefulset.yaml       | 130 +++++
 .../deployment/memcached/memcached-service.yaml    |  30 +
 .../memcached/memcached-statefulset.yaml           |  59 ++
 kubernetes/template/quickstart/deploy-kylin.sh     |  53 ++
 .../deployment/kylin/kylin-all-statefulset.yaml    |  92 +++
 .../quickstart/deployment/kylin/kylin-service.yaml |  36 ++
 126 files changed, 8531 insertions(+), 2 deletions(-)
 create mode 100644 kubernetes/README.md
 create mode 100644 kubernetes/config/README.md
 create mode 100644 kubernetes/config/production/filebeat/filebeat.yml
 create mode 100644 kubernetes/config/production/hadoop/core-site.xml
 create mode 100644 kubernetes/config/production/hadoop/hbase-site.xml
 create mode 100644 kubernetes/config/production/hadoop/hdfs-site.xml
 create mode 100644 kubernetes/config/production/hadoop/hive-site.xml
 create mode 100644 kubernetes/config/production/hadoop/mapred-site.xml
 create mode 100644 kubernetes/config/production/hadoop/yarn-site.xml
 copy {examples/test_case_data/sandbox => kubernetes/config/production/kylin-job}/kylin-kafka-consumer.xml (100%)
 create mode 100644 kubernetes/config/production/kylin-job/kylin-server-log4j.properties
 copy {build/conf => kubernetes/config/production/kylin-job}/kylin-spark-log4j.properties (100%)
 create mode 100644 kubernetes/config/production/kylin-job/kylin-tools-log4j.properties
 create mode 100644 kubernetes/config/production/kylin-job/kylin.properties
 copy {build/conf => kubernetes/config/production/kylin-job}/kylin_hive_conf.xml (100%)
 copy {build/conf => kubernetes/config/production/kylin-job}/kylin_job_conf.xml (100%)
 copy {build/conf => kubernetes/config/production/kylin-job}/kylin_job_conf_cube_merge.xml (100%)
 copy {build/conf => kubernetes/config/production/kylin-job}/kylin_job_conf_inmem.xml (100%)
 copy {build/conf => kubernetes/config/production/kylin-job}/setenv-tool.sh (100%)
 mode change 100755 => 100644
 create mode 100644 kubernetes/config/production/kylin-job/setenv.sh
 create mode 100644 kubernetes/config/production/kylin-more/applicationContext.xml
 copy {server/src/main/resources => kubernetes/config/production/kylin-more}/ehcache-test.xml (100%)
 copy {server/src/main/resources => kubernetes/config/production/kylin-more}/ehcache.xml (100%)
 copy {server/src/main/resources => kubernetes/config/production/kylin-more}/kylinMetrics.xml (100%)
 create mode 100644 kubernetes/config/production/kylin-more/kylinSecurity.xml
 copy {examples/test_case_data/sandbox => kubernetes/config/production/kylin-query}/kylin-kafka-consumer.xml (100%)
 create mode 100644 kubernetes/config/production/kylin-query/kylin-server-log4j.properties
 copy {build/conf => kubernetes/config/production/kylin-query}/kylin-spark-log4j.properties (100%)
 create mode 100644 kubernetes/config/production/kylin-query/kylin-tools-log4j.properties
 create mode 100644 kubernetes/config/production/kylin-query/kylin.properties
 copy {build/conf => kubernetes/config/production/kylin-query}/kylin_hive_conf.xml (100%)
 copy {build/conf => kubernetes/config/production/kylin-query}/kylin_job_conf.xml (100%)
 copy {build/conf => kubernetes/config/production/kylin-query}/kylin_job_conf_cube_merge.xml (100%)
 copy {build/conf => kubernetes/config/production/kylin-query}/kylin_job_conf_inmem.xml (100%)
 copy {build/conf => kubernetes/config/production/kylin-query}/setenv-tool.sh (100%)
 mode change 100755 => 100644
 create mode 100644 kubernetes/config/production/kylin-query/setenv.sh
 create mode 100644 kubernetes/config/production/streaming-receiver/kylin.properties
 create mode 100644 kubernetes/config/production/streaming-receiver/setenv.sh
 create mode 100644 kubernetes/config/production/tomcat/context.xml
 create mode 100644 kubernetes/config/production/tomcat/server.xml
 create mode 100644 kubernetes/config/quickstart/hadoop/core-site.xml
 create mode 100644 kubernetes/config/quickstart/hadoop/hbase-site.xml
 create mode 100644 kubernetes/config/quickstart/hadoop/hdfs-site.xml
 create mode 100644 kubernetes/config/quickstart/hadoop/hive-site.xml
 create mode 100644 kubernetes/config/quickstart/hadoop/mapred-site.xml
 create mode 100644 kubernetes/config/quickstart/hadoop/yarn-site.xml
 copy {examples/test_case_data/sandbox => kubernetes/config/quickstart/kylin}/kylin-kafka-consumer.xml (100%)
 create mode 100644 kubernetes/config/quickstart/kylin/kylin-server-log4j.properties
 copy {build/conf => kubernetes/config/quickstart/kylin}/kylin-spark-log4j.properties (100%)
 create mode 100644 kubernetes/config/quickstart/kylin/kylin-tools-log4j.properties
 create mode 100644 kubernetes/config/quickstart/kylin/kylin.properties
 copy {build/conf => kubernetes/config/quickstart/kylin}/kylin_hive_conf.xml (100%)
 copy {build/conf => kubernetes/config/quickstart/kylin}/kylin_job_conf.xml (100%)
 copy {build/conf => kubernetes/config/quickstart/kylin}/kylin_job_conf_cube_merge.xml (100%)
 copy {build/conf => kubernetes/config/quickstart/kylin}/kylin_job_conf_inmem.xml (100%)
 copy {build/conf => kubernetes/config/quickstart/kylin}/setenv-tool.sh (100%)
 mode change 100755 => 100644
 create mode 100644 kubernetes/config/quickstart/kylin/setenv.sh
 create mode 100644 kubernetes/docker/README.md
 create mode 100644 kubernetes/docker/hadoop-client/CDH57/Dockerfile
 create mode 100644 kubernetes/docker/hadoop-client/CDH57/build-image.sh
 create mode 100644 kubernetes/docker/hadoop-client/README.md
 create mode 100644 kubernetes/docker/hadoop-client/apache-hadoop2.7/Dockerfile
 create mode 100644 kubernetes/docker/hadoop-client/apache-hadoop2.7/build-image.sh
 create mode 100644 kubernetes/docker/kylin-client/Dockerfile
 create mode 100644 kubernetes/docker/kylin-client/README.md
 create mode 100755 kubernetes/docker/kylin-client/bin/bootstrap.sh
 create mode 100644 kubernetes/docker/kylin-client/bin/check-liveness.sh
 create mode 100644 kubernetes/docker/kylin-client/bin/check-readiness.sh
 create mode 100644 kubernetes/docker/kylin-client/bin/clean-log.sh
 create mode 100644 kubernetes/docker/kylin-client/build-image.sh
 create mode 100644 kubernetes/docker/kylin-client/crontab.txt
 create mode 100644 kubernetes/template/production/cleanup.sh
 create mode 100644 kubernetes/template/production/deploy-kylin.sh
 create mode 100644 kubernetes/template/production/deployment/kylin/kylin-job-statefulset.yaml
 create mode 100644 kubernetes/template/production/deployment/kylin/kylin-query-statefulset.yaml
 create mode 100644 kubernetes/template/production/deployment/kylin/kylin-service.yaml
 create mode 100644 kubernetes/template/production/deployment/memcached/memcached-service.yaml
 create mode 100644 kubernetes/template/production/deployment/memcached/memcached-statefulset.yaml
 create mode 100644 kubernetes/template/production/deployment/streaming/kylin-receiver-statefulset.yaml
 create mode 100644 kubernetes/template/production/example/README.md
 create mode 100644 kubernetes/template/production/example/config/filebeat/filebeat.yml
 create mode 100644 kubernetes/template/production/example/config/hadoop/core-site.xml
 create mode 100644 kubernetes/template/production/example/config/hadoop/hbase-site.xml
 create mode 100644 kubernetes/template/production/example/config/hadoop/hdfs-site.xml
 create mode 100644 kubernetes/template/production/example/config/hadoop/hive-site.xml
 create mode 100644 kubernetes/template/production/example/config/hadoop/mapred-site.xml
 create mode 100644 kubernetes/template/production/example/config/hadoop/yarn-site.xml
 copy {examples/test_case_data/sandbox => kubernetes/template/production/example/config/kylin-job}/kylin-kafka-consumer.xml (100%)
 create mode 100644 kubernetes/template/production/example/config/kylin-job/kylin-server-log4j.properties
 copy {build/conf => kubernetes/template/production/example/config/kylin-job}/kylin-spark-log4j.properties (100%)
 create mode 100644 kubernetes/template/production/example/config/kylin-job/kylin-tools-log4j.properties
 create mode 100644 kubernetes/template/production/example/config/kylin-job/kylin.properties
 copy {build/conf => kubernetes/template/production/example/config/kylin-job}/kylin_hive_conf.xml (100%)
 copy {build/conf => kubernetes/template/production/example/config/kylin-job}/kylin_job_conf.xml (100%)
 copy {build/conf => kubernetes/template/production/example/config/kylin-job}/kylin_job_conf_cube_merge.xml (100%)
 copy {build/conf => kubernetes/template/production/example/config/kylin-job}/kylin_job_conf_inmem.xml (100%)
 copy {build/conf => kubernetes/template/production/example/config/kylin-job}/setenv-tool.sh (100%)
 mode change 100755 => 100644
 create mode 100644 kubernetes/template/production/example/config/kylin-job/setenv.sh
 create mode 100644 kubernetes/template/production/example/config/kylin-more/applicationContext.xml
 copy {server/src/main/resources => kubernetes/template/production/example/config/kylin-more}/ehcache-test.xml (100%)
 copy {server/src/main/resources => kubernetes/template/production/example/config/kylin-more}/ehcache.xml (100%)
 copy {server/src/main/resources => kubernetes/template/production/example/config/kylin-more}/kylinMetrics.xml (100%)
 create mode 100644 kubernetes/template/production/example/config/kylin-more/kylinSecurity.xml
 copy {examples/test_case_data/sandbox => kubernetes/template/production/example/config/kylin-query}/kylin-kafka-consumer.xml (100%)
 create mode 100644 kubernetes/template/production/example/config/kylin-query/kylin-server-log4j.properties
 copy {build/conf => kubernetes/template/production/example/config/kylin-query}/kylin-spark-log4j.properties (100%)
 create mode 100644 kubernetes/template/production/example/config/kylin-query/kylin-tools-log4j.properties
 create mode 100644 kubernetes/template/production/example/config/kylin-query/kylin.properties
 copy {build/conf => kubernetes/template/production/example/config/kylin-query}/kylin_hive_conf.xml (100%)
 copy {build/conf => kubernetes/template/production/example/config/kylin-query}/kylin_job_conf.xml (100%)
 copy {build/conf => kubernetes/template/production/example/config/kylin-query}/kylin_job_conf_cube_merge.xml (100%)
 copy {build/conf => kubernetes/template/production/example/config/kylin-query}/kylin_job_conf_inmem.xml (100%)
 create mode 100644 kubernetes/template/production/example/config/kylin-query/setenv-tool.sh
 create mode 100644 kubernetes/template/production/example/config/kylin-query/setenv.sh
 create mode 100644 kubernetes/template/production/example/config/tomcat/context.xml
 create mode 100644 kubernetes/template/production/example/config/tomcat/server.xml
 create mode 100644 kubernetes/template/production/example/deployment/deploy-sample-cluster.sh
 create mode 100644 kubernetes/template/production/example/deployment/kylin-job/kylin-job-service.yaml
 create mode 100644 kubernetes/template/production/example/deployment/kylin-job/kylin-job-statefulset.yaml
 create mode 100644 kubernetes/template/production/example/deployment/kylin-query/kylin-query-statefulset.yaml
 create mode 100644 kubernetes/template/production/example/deployment/memcached/memcached-service.yaml
 create mode 100644 kubernetes/template/production/example/deployment/memcached/memcached-statefulset.yaml
 create mode 100644 kubernetes/template/quickstart/deploy-kylin.sh
 create mode 100644 kubernetes/template/quickstart/deployment/kylin/kylin-all-statefulset.yaml
 create mode 100644 kubernetes/template/quickstart/deployment/kylin/kylin-service.yaml


[kylin] 03/04: KYLIN-4447 Refactor and merge two related commits

Posted by xx...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xxyu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kylin.git

commit 23aad894a2d74aa47e46f8a0840e964300db7d3a
Author: XiaoxiangYu <hi...@126.com>
AuthorDate: Sun Apr 26 14:32:03 2020 +0800

    KYLIN-4447 Refactor and merge two related commits
---
 k8s/developments/kylin/kylin-all-statefulset.yaml  | 132 -----
 k8s/developments/kylin/kylin-job-statefulset.yaml  | 123 ----
 .../kylin/kylin-query-statefulset.yaml             | 132 -----
 .../kylin/kylin-receiver-statefulset.yaml          |  82 ---
 k8s/images/hadoop-client/README.MD                 |   5 -
 k8s/images/kylin/bin/check-liveness.sh             |  10 -
 kubernetes/Dockerfile                              |  78 ---
 kubernetes/README                                  |   6 +
 kubernetes/README.md                               | 109 ----
 kubernetes/config/README                           |   1 +
 .../production}/filebeat/filebeat.yml              |  18 +-
 kubernetes/config/production/hadoop/core-site.xml  |   4 +
 kubernetes/config/production/hadoop/hbase-site.xml |   4 +
 kubernetes/config/production/hadoop/hdfs-site.xml  |   4 +
 kubernetes/config/production/hadoop/hive-site.xml  |   4 +
 .../config/production/hadoop/mapred-site.xml       |   4 +
 kubernetes/config/production/hadoop/yarn-site.xml  |   4 +
 .../production/kylin-all/kylin-kafka-consumer.xml  |  31 +
 .../kylin-all}/kylin-server-log4j.properties       |   0
 .../kylin-all/kylin-spark-log4j.properties         |  35 +-
 .../kylin-all}/kylin-tools-log4j.properties        |   0
 .../config/production/kylin-all/kylin.properties   | 419 ++++++++++++++
 .../production/kylin-all/kylin_hive_conf.xml       | 102 ++++
 .../config/production/kylin-all/kylin_job_conf.xml |  88 +++
 .../kylin-all/kylin_job_conf_cube_merge.xml        | 104 ++++
 .../production/kylin-all/kylin_job_conf_inmem.xml  | 111 ++++
 .../config/production/kylin-all/setenv-tool.sh     |  73 +++
 kubernetes/config/production/kylin-all/setenv.sh   |  73 +++
 .../production/kylin-job/kylin-kafka-consumer.xml  |  31 +
 .../kylin-job}/kylin-server-log4j.properties       |   0
 .../kylin-job/kylin-spark-log4j.properties         |  35 +-
 .../kylin-job}/kylin-tools-log4j.properties        |   0
 .../config/production/kylin-job/kylin.properties   | 422 ++++++++++++++
 .../production/kylin-job/kylin_hive_conf.xml       | 102 ++++
 .../config/production/kylin-job/kylin_job_conf.xml |  88 +++
 .../kylin-job/kylin_job_conf_cube_merge.xml        | 104 ++++
 .../production/kylin-job/kylin_job_conf_inmem.xml  | 111 ++++
 .../config/production/kylin-job/setenv-tool.sh     |  73 +++
 kubernetes/config/production/kylin-job/setenv.sh   |  73 +++
 .../production/kylin-more/applicationContext.xml   | 124 ++++
 .../config/production/kylin-more/ehcache-test.xml  |  30 +
 .../config/production/kylin-more/ehcache.xml       |  30 +
 .../config/production/kylin-more/kylinMetrics.xml  |  86 +++
 .../config/production/kylin-more/kylinSecurity.xml | 634 +++++++++++++++++++++
 .../kylin-query/kylin-kafka-consumer.xml           |  31 +
 .../kylin-query}/kylin-server-log4j.properties     |   0
 .../kylin-query/kylin-spark-log4j.properties       |  35 +-
 .../kylin-query}/kylin-tools-log4j.properties      |   0
 .../config/production/kylin-query/kylin.properties | 419 ++++++++++++++
 .../production/kylin-query/kylin_hive_conf.xml     | 102 ++++
 .../production/kylin-query/kylin_job_conf.xml      |  88 +++
 .../kylin-query/kylin_job_conf_cube_merge.xml      | 104 ++++
 .../kylin-query/kylin_job_conf_inmem.xml           | 111 ++++
 .../config/production/kylin-query/setenv-tool.sh   |  73 +++
 kubernetes/config/production/kylin-query/setenv.sh |  73 +++
 .../production/streaming-receiver/kylin.properties | 413 ++++++++++++++
 .../config/production/streaming-receiver/setenv.sh |  73 +++
 kubernetes/config/quickstart/hadoop/core-site.xml  |   4 +
 kubernetes/config/quickstart/hadoop/hbase-site.xml |   4 +
 kubernetes/config/quickstart/hadoop/hdfs-site.xml  |   4 +
 kubernetes/config/quickstart/hadoop/hive-site.xml  |   4 +
 .../config/quickstart/hadoop/mapred-site.xml       |   4 +
 kubernetes/config/quickstart/hadoop/yarn-site.xml  |   4 +
 .../quickstart/kylin/kylin-kafka-consumer.xml      |  31 +
 .../kylin/kylin-server-log4j.properties            |   0
 .../quickstart/kylin/kylin-spark-log4j.properties  |  35 +-
 .../quickstart}/kylin/kylin-tools-log4j.properties |   0
 .../config/quickstart/kylin/kylin.properties       | 413 ++++++++++++++
 .../config/quickstart/kylin/kylin_hive_conf.xml    | 102 ++++
 .../config/quickstart/kylin/kylin_job_conf.xml     |  88 +++
 .../quickstart/kylin/kylin_job_conf_cube_merge.xml | 104 ++++
 .../quickstart/kylin/kylin_job_conf_inmem.xml      | 111 ++++
 kubernetes/config/quickstart/kylin/setenv-tool.sh  |  73 +++
 kubernetes/config/quickstart/kylin/setenv.sh       |  73 +++
 kubernetes/docker/README                           |   1 +
 kubernetes/docker/hadoop-client/CDH57/Dockerfile   |  49 ++
 .../docker/hadoop-client/CDH57/build-image.sh      |   1 +
 kubernetes/docker/hadoop-client/README.MD          |   9 +
 .../hadoop-client/apache-hadoop2.7}/Dockerfile     |   0
 .../hadoop-client/apache-hadoop2.7/build-image.sh  |   1 +
 .../docker/kylin-client}/Dockerfile                |  32 +-
 kubernetes/docker/kylin-client/README              |   8 +
 .../docker/kylin-client}/bin/bootstrap.sh          |   5 +-
 .../docker/kylin-client/bin/check-liveness.sh      |   1 +
 .../docker/kylin-client}/bin/check-readiness.sh    |   0
 .../docker/kylin-client}/bin/clean-log.sh          |   0
 kubernetes/docker/kylin-client/build-image.sh      |   2 +
 .../docker/kylin-client}/crontab.txt               |   0
 kubernetes/docker/upload.sh                        |   2 +
 kubernetes/example/README                          |  10 +
 kubernetes/example/config/filebeat/filebeat.yml    |  18 +-
 kubernetes/example/config/hadoop/core-site.xml     | 133 +++++
 kubernetes/example/config/hadoop/hbase-site.xml    | 109 ++++
 kubernetes/example/config/hadoop/hdfs-site.xml     |  69 +++
 kubernetes/example/config/hadoop/hive-site.xml     | 221 +++++++
 kubernetes/example/config/hadoop/mapred-site.xml   | 177 ++++++
 kubernetes/example/config/hadoop/yarn-site.xml     | 121 ++++
 .../config/kylin-job/kylin-kafka-consumer.xml      |  31 +
 .../kylin-job}/kylin-server-log4j.properties       |   0
 .../config/kylin-job/kylin-spark-log4j.properties  |  35 +-
 .../config/kylin-job}/kylin-tools-log4j.properties |   0
 .../example/config/kylin-job/kylin.properties      | 422 ++++++++++++++
 .../example/config/kylin-job/kylin_hive_conf.xml   | 102 ++++
 .../example/config/kylin-job/kylin_job_conf.xml    |  88 +++
 .../config/kylin-job/kylin_job_conf_cube_merge.xml | 104 ++++
 .../config/kylin-job/kylin_job_conf_inmem.xml      | 111 ++++
 kubernetes/example/config/kylin-job/setenv-tool.sh |  73 +++
 kubernetes/example/config/kylin-job/setenv.sh      |  73 +++
 .../config/kylin-more/applicationContext.xml       | 124 ++++
 .../example/config/kylin-more/ehcache-test.xml     |  30 +
 kubernetes/example/config/kylin-more/ehcache.xml   |  30 +
 .../example/config/kylin-more/kylinMetrics.xml     |  86 +++
 .../example/config/kylin-more/kylinSecurity.xml    | 634 +++++++++++++++++++++
 .../config/kylin-query/kylin-kafka-consumer.xml    |  31 +
 .../kylin-query}/kylin-server-log4j.properties     |   0
 .../kylin-query/kylin-spark-log4j.properties       |  35 +-
 .../kylin-query}/kylin-tools-log4j.properties      |   0
 .../example/config/kylin-query/kylin.properties    | 419 ++++++++++++++
 .../example/config/kylin-query/kylin_hive_conf.xml | 102 ++++
 .../example/config/kylin-query/kylin_job_conf.xml  |  88 +++
 .../kylin-query/kylin_job_conf_cube_merge.xml      | 104 ++++
 .../config/kylin-query/kylin_job_conf_inmem.xml    | 111 ++++
 .../example/config/kylin-query/setenv-tool.sh      |  73 +++
 kubernetes/example/config/kylin-query/setenv.sh    |  73 +++
 .../example/deployment/deploy-sample-cluster.sh    | 112 ++++
 .../deployment/kylin-job/kylin-job-service.yaml    |   6 +-
 .../kylin-job/kylin-job-statefulset.yaml           | 124 ++++
 .../kylin-query/kylin-query-service.yaml           |   6 +-
 .../kylin-query/kylin-query-statefulset.yaml       | 105 ++++
 .../deployment}/memcached/memcached-service.yaml   |   3 +-
 .../memcached/memcached-statefulset.yaml           |  21 +-
 kubernetes/kylin-configmap.sh                      |  17 -
 kubernetes/kylin-job-statefulset.yaml              |  95 ---
 kubernetes/kylin-query-statefulset.yaml            |  95 ---
 kubernetes/kylin-secret.sh                         |   3 -
 kubernetes/kylin-service.yaml                      |  44 --
 kubernetes/template/README                         |   0
 kubernetes/template/production/check-deploy.sh     |   9 +
 kubernetes/template/production/cleanup.sh          |   6 +
 kubernetes/template/production/deploy-kylin.sh     | 108 ++++
 .../deployment/kylin/kylin-all-statefulset.yaml    | 129 +++++
 .../deployment/kylin/kylin-job-statefulset.yaml    | 129 +++++
 .../deployment/kylin/kylin-query-statefulset.yaml  | 110 ++++
 .../kylin/kylin-receiver-statefulset.yaml          |  67 +++
 .../deployment}/kylin/kylin-service.yaml           |   3 +-
 .../deployment}/memcached/memcached-service.yaml   |   2 +-
 .../memcached/memcached-statefulset.yaml           |  12 +-
 kubernetes/template/quickstart/check-cluster.sh    |   7 +
 kubernetes/template/quickstart/cleanup.sh          |   2 +
 kubernetes/template/quickstart/deploy-kylin.sh     |  43 ++
 .../deployment/kylin/kylin-all-statefulset.yaml    |  93 +++
 .../deployment}/kylin/kylin-service.yaml           |   2 +-
 152 files changed, 10491 insertions(+), 1085 deletions(-)

diff --git a/k8s/developments/kylin/kylin-all-statefulset.yaml b/k8s/developments/kylin/kylin-all-statefulset.yaml
deleted file mode 100644
index f97256e..0000000
--- a/k8s/developments/kylin/kylin-all-statefulset.yaml
+++ /dev/null
@@ -1,132 +0,0 @@
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: kylin-all
-  namespace: kylin
-spec:
-  serviceName: kylin-svc
-  replicas: 1
-  selector:
-    matchExpressions:
-    - key: app
-      operator: In
-      values:
-      - kylin
-    - key: query
-      operator: In
-      values:
-      - "true"
-    - key: job
-      operator: In
-      values:
-      - "true"
-  template:
-    metadata:
-      labels:
-        app: kylin
-        query: "true"
-        job: "true"
-    spec:
-      containers:
-      - name: kylin
-        image: apachekylin/kylin:{version}
-        command:
-        - sh
-        - -c
-        args:
-        - cp $KYLIN_HOME/tomcat-config/* $KYLIN_HOME/tomcat/conf;
-          cp $KYLIN_HOME/sso-config/* $KYLIN_HOME/tomcat/webapps/kylin/WEB-INF/classes;
-          $TOOL_HOME/bootstrap.sh server -d;
-        ports:
-        - containerPort: 7070
-        - containerPort: 7443
-        livenessProbe:
-          exec:
-            command:
-            - sh
-            - -c
-            - $TOOL_HOME/check-liveness.sh
-          initialDelaySeconds: 120
-          periodSeconds: 60
-        readinessProbe:
-          exec:
-            command:
-            - sh
-            - -c
-            - $TOOL_HOME/check-readiness.sh
-          initialDelaySeconds: 120
-          periodSeconds: 60
-          failureThreshold: 30
-        volumeMounts:
-        - name: kylin-config
-          mountPath: /home/b_kylin/kylin2/conf
-        - name: tomcat-config
-          mountPath: /home/b_kylin/kylin2/tomcat-config
-        - name: kylin-logs
-          mountPath: /home/b_kylin/kylin2/logs
-        - name: tomcat-logs
-          mountPath: /home/b_kylin/kylin2/tomcat/logs
-        - name: sso-config
-          mountPath: /home/b_kylin/kylin2/sso-config
-        resources:
-          requests:
-            memory: 124Gi
-            cpu: 24
-          limits:
-            memory: 124Gi
-            cpu: 32
-      - name: filebeat
-        image: docker.elastic.co/beats/filebeat:6.4.3
-        args:
-        - -c
-        - /usr/share/filebeat/config/filebeat.yml
-        - -e
-        volumeMounts:
-        - name: kylin-logs
-          mountPath: /var/log/kylin
-        - name: tomcat-logs
-          mountPath: /var/log/tomcat
-        - name: filebeat-config
-          mountPath: /usr/share/filebeat/config
-          readOnly: true
-        resources:
-          requests:
-            memory: 4Gi
-            cpu: 2
-          limits:
-            memory: 4Gi
-            cpu: 2
-      volumes:
-      - name: kylin-logs
-        emptyDir:
-          sizeLimit: 30Gi
-      - name: tomcat-logs
-        emptyDir:
-          sizeLimit: 2Gi
-      - name: kylin-config
-        secret:
-          secretName: kylin-config
-          items:
-          - key: kylin.properties
-            path: kylin.properties
-          - key: kylin-tools-log4j.properties
-            path: kylin-tools-log4j.properties
-          - key: kylin_hive_conf.xml
-            path: kylin_hive_conf.xml
-          - key: kylin_job_conf_inmem.xml
-            path: kylin_job_conf_inmem.xml
-          - key: kylin-server-log4j.properties
-            path: kylin-server-log4j.properties
-          - key: kylin_job_conf.xml
-            path: kylin_job_conf.xml
-          - key: setenv.sh
-            path: setenv.sh
-      - name: tomcat-config
-        secret:
-          secretName: tomcat-config
-      - name: sso-config
-        secret:
-          secretName: sso-config
-      - name: filebeat-config
-        secret:
-          secretName: filebeat-config
\ No newline at end of file
diff --git a/k8s/developments/kylin/kylin-job-statefulset.yaml b/k8s/developments/kylin/kylin-job-statefulset.yaml
deleted file mode 100644
index df95084..0000000
--- a/k8s/developments/kylin/kylin-job-statefulset.yaml
+++ /dev/null
@@ -1,123 +0,0 @@
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: kylin-job
-  namespace: kylin
-spec:
-  serviceName: kylin-svc
-  replicas: 1
-  selector:
-    matchExpressions:
-    - key: app
-      operator: In
-      values:
-      - kylin
-    - key: query
-      operator: In
-      values:
-      - "false"
-    - key: job
-      operator: In
-      values:
-      - "true"
-  template:
-    metadata:
-      labels:
-        app: kylin
-        query: "false"
-        job: "true"
-    spec:
-      containers:
-      - name: kylin
-        image: apachekylin/kylin:{version}
-        command:
-        - sh
-        - -c
-        args:
-        - cp $KYLIN_HOME/tomcat-config/* $KYLIN_HOME/tomcat/conf;
-          cp $KYLIN_HOME/sso-config/* $KYLIN_HOME/tomcat/webapps/kylin/WEB-INF/classes;
-          $TOOL_HOME/bootstrap.sh server -d;
-        ports:
-        - containerPort: 7070
-        - containerPort: 7443
-        livenessProbe:
-          exec:
-            command:
-            - sh
-            - -c
-            - $TOOL_HOME/check-liveness.sh
-          initialDelaySeconds: 120
-          periodSeconds: 60
-        volumeMounts:
-        - name: kylin-config
-          mountPath: /home/b_kylin/kylin2/conf
-        - name: tomcat-config
-          mountPath: /home/b_kylin/kylin2/tomcat-config
-        - name: kylin-logs
-          mountPath: /home/b_kylin/kylin2/logs
-        - name: tomcat-logs
-          mountPath: /home/b_kylin/kylin2/tomcat/logs
-        - name: sso-config
-          mountPath: /home/b_kylin/kylin2/sso-config
-        resources:
-          requests:
-            memory: 124Gi
-            cpu: 24
-          limits:
-            memory: 124Gi
-            cpu: 32
-      - name: filebeat
-        image: docker.elastic.co/beats/filebeat:6.4.3
-        args:
-        - -c
-        - /usr/share/filebeat/config/filebeat.yml
-        - -e
-        volumeMounts:
-        - name: kylin-logs
-          mountPath: /var/log/kylin
-        - name: tomcat-logs
-          mountPath: /var/log/tomcat
-        - name: filebeat-config
-          mountPath: /usr/share/filebeat/config
-          readOnly: true
-        resources:
-          requests:
-            memory: 4Gi
-            cpu: 2
-          limits:
-            memory: 4Gi
-            cpu: 2
-      volumes:
-      - name: kylin-logs
-        emptyDir:
-          sizeLimit: 30Gi
-      - name: tomcat-logs
-        emptyDir:
-          sizeLimit: 2Gi
-      - name: kylin-config
-        secret:
-          secretName: kylin-config
-          items:
-          - key: kylin-job.properties
-            path: kylin.properties
-          - key: kylin-tools-log4j.properties
-            path: kylin-tools-log4j.properties
-          - key: kylin_hive_conf.xml
-            path: kylin_hive_conf.xml
-          - key: kylin_job_conf_inmem.xml
-            path: kylin_job_conf_inmem.xml
-          - key: kylin-server-log4j.properties
-            path: kylin-server-log4j.properties
-          - key: kylin_job_conf.xml
-            path: kylin_job_conf.xml
-          - key: setenv.sh
-            path: setenv.sh
-      - name: tomcat-config
-        secret:
-          secretName: tomcat-config
-      - name: sso-config
-        secret:
-          secretName: sso-config
-      - name: filebeat-config
-        secret:
-          secretName: filebeat-config
\ No newline at end of file
diff --git a/k8s/developments/kylin/kylin-query-statefulset.yaml b/k8s/developments/kylin/kylin-query-statefulset.yaml
deleted file mode 100644
index fa1f3c0..0000000
--- a/k8s/developments/kylin/kylin-query-statefulset.yaml
+++ /dev/null
@@ -1,132 +0,0 @@
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: kylin-query
-  namespace: kylin
-spec:
-  serviceName: kylin-svc
-  replicas: 1
-  selector:
-    matchExpressions:
-    - key: app
-      operator: In
-      values:
-      - kylin
-    - key: query
-      operator: In
-      values:
-      - "true"
-    - key: job
-      operator: In
-      values:
-      - "false"
-  template:
-    metadata:
-      labels:
-        app: kylin
-        query: "true"
-        job: "false"
-    spec:
-      containers:
-      - name: kylin
-        image: apachekylin/kylin:{version}
-        command:
-        - sh
-        - -c
-        args:
-        - cp $KYLIN_HOME/tomcat-config/* $KYLIN_HOME/tomcat/conf;
-          cp $KYLIN_HOME/sso-config/* $KYLIN_HOME/tomcat/webapps/kylin/WEB-INF/classes;
-          $TOOL_HOME/bootstrap.sh server -d;
-        ports:
-        - containerPort: 7070
-        - containerPort: 7443
-        livenessProbe:
-          exec:
-            command:
-            - sh
-            - -c
-            - $TOOL_HOME/check-liveness.sh
-          initialDelaySeconds: 120
-          periodSeconds: 60
-        readinessProbe:
-          exec:
-            command:
-            - sh
-            - -c
-            - $TOOL_HOME/check-readiness.sh
-          initialDelaySeconds: 120
-          periodSeconds: 60
-          failureThreshold: 30
-        volumeMounts:
-        - name: kylin-config
-          mountPath: /home/b_kylin/kylin2/conf
-        - name: tomcat-config
-          mountPath: /home/b_kylin/kylin2/tomcat-config
-        - name: kylin-logs
-          mountPath: /home/b_kylin/kylin2/logs
-        - name: tomcat-logs
-          mountPath: /home/b_kylin/kylin2/tomcat/logs
-        - name: sso-config
-          mountPath: /home/b_kylin/kylin2/sso-config
-        resources:
-          requests:
-            memory: 124Gi
-            cpu: 24
-          limits:
-            memory: 124Gi
-            cpu: 32
-      - name: filebeat
-        image: docker.elastic.co/beats/filebeat:6.4.3
-        args:
-        - -c
-        - /usr/share/filebeat/config/filebeat.yml
-        - -e
-        volumeMounts:
-        - name: kylin-logs
-          mountPath: /var/log/kylin
-        - name: tomcat-logs
-          mountPath: /var/log/tomcat
-        - name: filebeat-config
-          mountPath: /usr/share/filebeat/config
-          readOnly: true
-        resources:
-          requests:
-            memory: 4Gi
-            cpu: 2
-          limits:
-            memory: 4Gi
-            cpu: 2
-      volumes:
-      - name: kylin-logs
-        emptyDir:
-          sizeLimit: 30Gi
-      - name: tomcat-logs
-        emptyDir:
-          sizeLimit: 2Gi
-      - name: kylin-config
-        secret:
-          secretName: kylin-config
-          items:
-          - key: kylin-query.properties
-            path: kylin.properties
-          - key: kylin-tools-log4j.properties
-            path: kylin-tools-log4j.properties
-          - key: kylin_hive_conf.xml
-            path: kylin_hive_conf.xml
-          - key: kylin_job_conf_inmem.xml
-            path: kylin_job_conf_inmem.xml
-          - key: kylin-server-log4j.properties
-            path: kylin-server-log4j.properties
-          - key: kylin_job_conf.xml
-            path: kylin_job_conf.xml
-          - key: setenv.sh
-            path: setenv.sh
-      - name: tomcat-config
-        secret:
-          secretName: tomcat-config
-      - name: sso-config
-        secret:
-          secretName: sso-config
-      - name: filebeat-config
-        secret:
-          secretName: filebeat-config
\ No newline at end of file
diff --git a/k8s/developments/kylin/kylin-receiver-statefulset.yaml b/k8s/developments/kylin/kylin-receiver-statefulset.yaml
deleted file mode 100644
index 4c35b45..0000000
--- a/k8s/developments/kylin/kylin-receiver-statefulset.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: kylin-receiver
-  namespace: kylin
-spec:
-  serviceName: receiver-svc
-  replicas: 10
-  selector:
-    matchExpressions:
-    - key: app
-      operator: In
-      values:
-        - kylin-receiver
-  template:
-    metadata:
-      labels:
-        app: kylin-receiver
-    spec:
-      securityContext:
-        fsGroup: 996
-      containers:
-      - name: kylin
-        image: apachekylin/kylin:{version}
-        command:
-        - sh
-        - -c
-        args:
-        - $TOOL_HOME/bootstrap.sh streaming -d
-        ports:
-        - containerPort: 9090
-        livenessProbe:
-          httpGet:
-            path: /kylin/api/stats/healthCheck
-            port: 9090
-          initialDelaySeconds: 120
-          periodSeconds: 60
-        volumeMounts:
-        - name: kylin-config
-          mountPath: /home/b_kylin/kylin2/conf
-        - name: receiver-volume
-          mountPath: /home/b_kylin/kylin2/stream_index
-        resources:
-          requests:
-            memory: 86Gi
-            cpu: 16
-          limits:
-            memory: 86Gi
-            cpu: 16
-      volumes:
-      - name: kylin-config
-        secret:
-          secretName: kylin-config
-          items:
-          - key: kylin-receiver.properties
-            path: kylin.properties
-          - key: kylin-tools-log4j.properties
-            path: kylin-tools-log4j.properties
-          - key: kylin_hive_conf.xml
-            path: kylin_hive_conf.xml
-          - key: kylin_job_conf_inmem.xml
-            path: kylin_job_conf_inmem.xml
-          - key: kylin-server-log4j.properties
-            path: kylin-server-log4j.properties
-          - key: kylin_job_conf.xml
-            path: kylin_job_conf.xml
-          - key: kylin-receiver-setenv.sh
-            path: setenv.sh
-          - key: mystore.jks
-            path: mystore.jks
-          - key: truststore.jks
-            path: truststore.jks
-  volumeClaimTemplates:
-  - metadata:
-      name: receiver-volume
-    spec:
-      accessModes: 
-        - ReadWriteOnce
-      resources:
-        requests:
-          storage: 400Gi
-      storageClassName: local-dynamic
\ No newline at end of file
diff --git a/k8s/images/hadoop-client/README.MD b/k8s/images/hadoop-client/README.MD
deleted file mode 100644
index 6cb56e8..0000000
--- a/k8s/images/hadoop-client/README.MD
+++ /dev/null
@@ -1,5 +0,0 @@
-Build Step
-1. Please download/copy your hadoop client to hadoop-client folder, which includes hadoop, hive, hbase, spark and zookeeper.
-2. Please copy your hadoop config to config folder, which includes hadoop, hive, hbase and krb5.
-2. Update related version variables in Dockerfile.
-3. Run docker build -t "apachekylin/kylin-hdp-client:${VERSION}"
\ No newline at end of file
diff --git a/k8s/images/kylin/bin/check-liveness.sh b/k8s/images/kylin/bin/check-liveness.sh
deleted file mode 100644
index 6aa0df1..0000000
--- a/k8s/images/kylin/bin/check-liveness.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-result="$(curl --write-out %{http_code} --silent --output /dev/null http://127.0.0.1:7070/kylin/)"
-if [ $result == 200 ]
-then
-  echo "check http get successful"
-  exit 0
-else
-  echo "check http get failed"
-  exit 1
-fi
diff --git a/kubernetes/Dockerfile b/kubernetes/Dockerfile
deleted file mode 100644
index 6454417..0000000
--- a/kubernetes/Dockerfile
+++ /dev/null
@@ -1,78 +0,0 @@
-FROM centos:6.9
-
-ARG APACHE_MIRRORS=http://mirrors.aliyun.com
-ENV APACHE_MIRRORS  ${APACHE_MIRRORS}
-
-ENV JAVA_VERSION    1.8.0
-ENV SPARK_VERSION   2.3.4
-ENV KAFKA_VERSION   2.1.1
-ENV KYLIN_VERSION   3.0.0
-
-ENV JAVA_HOME       /usr/lib/jvm/java-${JAVA_VERSION}
-ENV HADOOP_HOME     /usr/lib/hadoop
-ENV HIVE_HOME       /usr/lib/hive
-ENV HCAT_HOME       /usr/lib/hive-hcatalog
-ENV HBASE_HOME      /usr/lib/hbase
-ENV SPARK_HOME      /opt/spark-${SPARK_VERSION}-bin-hadoop2.6
-ENV KAFKA_HOME      /opt/kafka_2.11-${KAFKA_VERSION}
-ENV KYLIN_HOME      /opt/apache-kylin-${KYLIN_VERSION}-bin-cdh57
-
-ENV PATH $PATH:\
-$SPARK_HOME/bin:\
-$KAFKA_HOME/bin:\
-$KYLIN_HOME/bin
-
-ENV HADOOP_CONF_DIR  /etc/hadoop/conf
-ENV HIVE_CONF_DIR    /etc/hive/conf
-ENV HBASE_CONF_DIR   /etc/hbase/conf
-ENV HIVE_CONF        ${HIVE_CONF_DIR}
-ENV HIVE_LIB         ${HIVE_HOME}/lib
-
-RUN echo $'[cloudera-cdh5] \n\
-# Packages for Cloudera\'s Distribution for Hadoop, Version 5, on RedHat or CentOS 6 x86_64 \n\
-name=Cloudera\'s Distribution for Hadoop, Version 5 \n\
-baseurl=https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/5.7.6/ \n\
-gpgkey =https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/RPM-GPG-KEY-cloudera \n\
-gpgcheck = 1' > /etc/yum.repos.d/cloudera-cdh5.repo
-
-WORKDIR /opt
-
-# Download Kafka from APACHE_MIRRORS
-RUN set -xeu && \
-    curl -o kafka_2.11-${KAFKA_VERSION}.tgz \
-    ${APACHE_MIRRORS}/apache/kafka/${KAFKA_VERSION}/kafka_2.11-${KAFKA_VERSION}.tgz && \
-    tar -zxf kafka_2.11-${KAFKA_VERSION}.tgz && rm kafka_2.11-${KAFKA_VERSION}.tgz
-
-# Download Spark from APACHE_MIRRORS
-RUN set -xeu && \
-    curl -o spark-${SPARK_VERSION}-bin-hadoop2.6.tgz \
-    ${APACHE_MIRRORS}/apache/spark/spark-${SPARK_VERSION}/spark-${SPARK_VERSION}-bin-hadoop2.6.tgz && \
-    tar -zxf spark-${SPARK_VERSION}-bin-hadoop2.6.tgz && rm spark-${SPARK_VERSION}-bin-hadoop2.6.tgz
-
-# Download Kylin from APACHE_MIRRORS
-RUN set -xeu && \
-    curl -o apache-kylin-${KYLIN_VERSION}-bin-cdh57.tar.gz \
-    ${APACHE_MIRRORS}/apache/kylin/apache-kylin-${KYLIN_VERSION}/apache-kylin-${KYLIN_VERSION}-bin-cdh57.tar.gz && \
-    tar -zxf apache-kylin-${KYLIN_VERSION}-bin-cdh57.tar.gz && rm apache-kylin-${KYLIN_VERSION}-bin-cdh57.tar.gz
-
-# Setup Hadoop & Hive & HBase using CDH Repository. PS: The libhadoop.so provided by CDH is complied with snappy
-RUN set -xeu && \
-    yum -y -q install java-1.8.0-openjdk-devel && \
-    yum -y -q install krb5-workstation && \
-    yum -y -q install hadoop-client && \
-    yum -y -q install hive hive-hcatalog && \
-    yum -y -q install hbase && \
-    curl -o ${HIVE_HOME}/lib/hadoop-lzo-0.4.15.jar \
-    https://clojars.org/repo/hadoop-lzo/hadoop-lzo/0.4.15/hadoop-lzo-0.4.15.jar && \
-    curl -o ${HIVE_HOME}/lib/mysql-connector-java-5.1.24.jar \
-    https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.24/mysql-connector-java-5.1.24.jar && \
-    yum -q clean all && \
-    rm -rf /var/cache/yum && \
-    rm -rf /tmp/* /var/tmp/* && \
-    groupadd kylin --gid 1000 && \
-    useradd kylin --uid 1000 --gid 1000 && \
-    chown -R "kylin:kylin" ${KYLIN_HOME}
-
-EXPOSE 7070
-USER kylin:kylin
-CMD ${KYLIN_HOME}/bin/kylin.sh run
\ No newline at end of file
diff --git a/kubernetes/README b/kubernetes/README
new file mode 100644
index 0000000..c29e018
--- /dev/null
+++ b/kubernetes/README
@@ -0,0 +1,6 @@
+## Steps
+
+1. Build **hadoop-client** docker image.
+2. Build **kylin-client** docker image.
+3. Go to `config` dir, prepare configuration file for Kylin and Hadoop.
+4. Go to `template` dir,  modify system resources entry and deploy kylin service.
diff --git a/kubernetes/README.md b/kubernetes/README.md
deleted file mode 100644
index 205e3f3..0000000
--- a/kubernetes/README.md
+++ /dev/null
@@ -1,109 +0,0 @@
-# Kubernetes QuickStart
-
-This guide shows how to run Kylin cluster using Kubernetes StatefulSet Controller. The following figure depicts a typical scenario for Kylin cluster mode deployment:
-
-![image_name](http://kylin.apache.org/images/install/kylin_server_modes.png)
-
-## Build or Pull Docker Image
-
-You can pull the image from Docker Hub directly if you do not want to build the image locally:
-
-```bash
-docker pull apachekylin/apache-kylin:3.0.0-cdh57
-```
-
-TIPS: If you are woking with air-gapped network or slow internet speeds, we suggest you prepare the binary packages by yourself and execute this:
-
-```bash
-docker build -t "apache-kylin:${KYLIN_VERSION}-cdh57" --build-arg APACHE_MIRRORS=http://127.0.0.1:8000 .
-```
-
-## Prepare your Hadoop Configuration
-
-Put all of the configuration files under the "conf" directory.
-
-```bash
-kylin.properties
-applicationContext.xml  # If you need to set cacheManager to Memcached
-hbase-site.xml
-hive-site.xml
-hdfs-site.xml
-core-site.xml
-mapred-site.xml
-yarn-site.xml
-```
-
-If you worked with Kerberized Hadoop Cluster, do not forget to prepare the following files:
-
-```bash
-krb5.conf
-kylin.keytab
-```
-
-## Create ConfigMaps and Secret
-
-We recommand you to create separate Kubernetes namespace for Kylin.
-
-```bash
-kubectl create namespace kylin
-```
-
-Execute the following shell scripts to create the required ConfigMaps:
-
-```bash
-./kylin-configmap.sh
-./kylin-secret.sh
-```
-
-## Create Service and StatefulSet
-
-Make sure the following resources exist in your namespace:
-
-```bash
-kubectl get configmaps,secret -n kylin
-
-NAME                      DATA   AGE
-configmap/hadoop-config   4      89d
-configmap/hbase-config    1      89d
-configmap/hive-config     1      89d
-configmap/krb5-config     1      89d
-configmap/kylin-config    1      89d
-configmap/kylin-context   1      45d
-
-NAME                         TYPE                                  DATA   AGE
-secret/kylin-keytab          Opaque                                1      89d
-
-```
-
-Then, you need to create headless service for stable DNS entries(kylin-0.kylin, kylin-1.kylin, kylin-2.kylin...) of StatefulSet members.
-
-```bash
-kubectl apply -f kylin-service.yaml
-```
-
-Finally, create the StatefulSet and try to use it:
-
-```bash
-kubectl apply -f kylin-job-statefulset.yaml
-kubectl apply -f kylin-query-statefulset.yaml
-```
-
-If everything goes smoothly, you should see all 3 Pods become Running:
-
-```bash
-kubectl get statefulset,pod,service -n kylin
-
-NAME                           READY   AGE
-statefulset.apps/kylin-job     1/1     36d
-statefulset.apps/kylin-query   3/3     36d
-
-NAME                READY   STATUS    RESTARTS   AGE
-pod/kylin-job-0     1/1     Running   0          13m
-pod/kylin-query-0   1/1     Running   0          40h
-pod/kylin-query-1   1/1     Running   0          40h
-
-NAME                  TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
-service/kylin         ClusterIP   None             <none>        7070/TCP   58d
-service/kylin-job     ClusterIP   xx.xxx.xx.xx     <none>        7070/TCP   89d
-service/kylin-query   ClusterIP   xx.xxx.xxx.xxx   <none>        7070/TCP   89d
-```
diff --git a/kubernetes/config/README b/kubernetes/config/README
new file mode 100644
index 0000000..e08320d
--- /dev/null
+++ b/kubernetes/config/README
@@ -0,0 +1 @@
+Please replace with your own configuration here.
\ No newline at end of file
diff --git a/kubernetes/example/config/filebeat/filebeat.yml b/kubernetes/config/production/filebeat/filebeat.yml
similarity index 88%
copy from kubernetes/example/config/filebeat/filebeat.yml
copy to kubernetes/config/production/filebeat/filebeat.yml
index c78c7e6..6916da1 100644
--- a/kubernetes/example/config/filebeat/filebeat.yml
+++ b/kubernetes/config/production/filebeat/filebeat.yml
@@ -18,7 +18,7 @@ filebeat.prospectors:
 - type: log
   enabled: true
   paths:
-    - /var/log/kylin/kylin.log
+    - /home/apache_kylin/kylin/logs
   multiline.pattern: '^\d{4}-([1][0-2]|[0][0-9])-([3][0-1]|[1-2][0-9]|[0][1-9]|[1-9]) ([2][0-3]|[0-1][0-9]|[1-9]):[0-5][0-9]:([0-5][0-9]|[6][0])'
   multiline.negate: true
   multiline.match: after
@@ -30,7 +30,7 @@ filebeat.prospectors:
 - type: log
   enabled: true
   paths:
-    - /var/log/kylin/kylin.gc.*.current
+    - /home/apache_kylin/kylin/logs/kylin.gc.*.current
   multiline.pattern: '^\d{4}-(?:0?[1-9]|1[0-2])-(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9])T(?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?)(?:Z|[+-](?:2[0123]|[01]?[0-9])(?::?(?:[0-5][0-9])))'
   multiline.negate: true
   multiline.match: after
@@ -42,7 +42,7 @@ filebeat.prospectors:
 - type: log
   enabled: true
   paths:
-    - /var/log/kylin/kylin.out
+    - /home/apache_kylin/kylin/logs/kylin.out
   multiline.pattern: '^\b(?:Jan?|Feb?|Mar?|Apr?|May?|Jun?|Jul?|Aug?|Sep?|Oct?|Nov?|Dec?)\b (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]), \d{4} (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) (?:AM|PM)'
   multiline.negate: true
   multiline.match: after
@@ -53,7 +53,7 @@ filebeat.prospectors:
 # access log #
 - type: log
   paths:
-    - /var/log/tomcat/localhost_access_log.txt
+    - /home/apache_kylin/kylin/tomcat/logs/localhost_access_log.txt
   fields:
     name: "access"
     pipeline: "access"
@@ -61,7 +61,7 @@ filebeat.prospectors:
 # catalina log #
 - type: log
   paths:
-    - /var/log/tomcat/catalina.*.log
+    - /home/apache_kylin/kylin/tomcat/logs/catalina.*.log
   multiline.pattern: '^\b(?:Jan?|Feb?|Mar?|Apr?|May?|Jun?|Jul?|Aug?|Sep?|Oct?|Nov?|Dec?)\b (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]), \d{4} (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) (?:AM|PM)'
   multiline.negate: true
   multiline.match: after
@@ -72,7 +72,7 @@ filebeat.prospectors:
 # localhost log #
 - type: log
   paths:
-    - /var/log/tomcat/localhost.*.log
+    - /home/apache_kylin/kylin/tomcat/logs/localhost.*.log
   multiline.pattern: '^\b(?:Jan?|Feb?|Mar?|Apr?|May?|Jun?|Jul?|Aug?|Sep?|Oct?|Nov?|Dec?)\b (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]), \d{4} (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) (?:AM|PM)'
   multiline.negate: true
   multiline.match: after
@@ -103,8 +103,6 @@ setup.kibana:
 #-------------------------- Elasticsearch output ------------------------------
 output.elasticsearch:
   # Array of hosts to connect to.
-  hosts: []
-  protocol: "https"
-  username: ""
-  password: ""
+  hosts: ["cdh-master:9200"]
+  protocol: "http"
   index: "kylin-%{[fields.name]}-%{+yyyy-MM-dd}"
\ No newline at end of file
diff --git a/kubernetes/config/production/hadoop/core-site.xml b/kubernetes/config/production/hadoop/core-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/production/hadoop/core-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/production/hadoop/hbase-site.xml b/kubernetes/config/production/hadoop/hbase-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/production/hadoop/hbase-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/production/hadoop/hdfs-site.xml b/kubernetes/config/production/hadoop/hdfs-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/production/hadoop/hdfs-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/production/hadoop/hive-site.xml b/kubernetes/config/production/hadoop/hive-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/production/hadoop/hive-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/production/hadoop/mapred-site.xml b/kubernetes/config/production/hadoop/mapred-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/production/hadoop/mapred-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/production/hadoop/yarn-site.xml b/kubernetes/config/production/hadoop/yarn-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/production/hadoop/yarn-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/production/kylin-all/kylin-kafka-consumer.xml b/kubernetes/config/production/kylin-all/kylin-kafka-consumer.xml
new file mode 100644
index 0000000..8529a41
--- /dev/null
+++ b/kubernetes/config/production/kylin-all/kylin-kafka-consumer.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+ for more kafka consumer configs, please refer to http://kafka.apache.org/documentation#consumerconfigs
+-->
+<configuration>
+    <property>
+        <name>session.timeout.ms</name>
+        <value>10000</value>
+    </property>
+    <property>
+        <name>request.timeout.ms</name>
+        <value>20000</value>
+    </property>
+</configuration>
\ No newline at end of file
diff --git a/k8s/developments/config/kylin/kylin-server-log4j.properties b/kubernetes/config/production/kylin-all/kylin-server-log4j.properties
similarity index 100%
copy from k8s/developments/config/kylin/kylin-server-log4j.properties
copy to kubernetes/config/production/kylin-all/kylin-server-log4j.properties
diff --git a/k8s/developments/config/kylin/kylin-tools-log4j.properties b/kubernetes/config/production/kylin-all/kylin-spark-log4j.properties
similarity index 51%
copy from k8s/developments/config/kylin/kylin-tools-log4j.properties
copy to kubernetes/config/production/kylin-all/kylin-spark-log4j.properties
index 54d18c2..948fb32 100644
--- a/k8s/developments/config/kylin/kylin-tools-log4j.properties
+++ b/kubernetes/config/production/kylin-all/kylin-spark-log4j.properties
@@ -15,24 +15,29 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-
-
-# the kylin-tools-log4j.properties is mainly for configuring log properties on kylin tools, including:
-#   1. tools launched by kylin.sh script, e.g. DeployCoprocessorCLI
-#   2. DebugTomcat
-#   3. others
-#
-# It's called kylin-tools-log4j.properties so that it won't distract users from the other more important log4j config file: kylin-server-log4j.properties
-# enable this by -Dlog4j.configuration=kylin-tools-log4j.properties
-
-log4j.rootLogger=INFO,stderr
+log4j.rootCategory=WARN,stderr,stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.target=System.out
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
 
 log4j.appender.stderr=org.apache.log4j.ConsoleAppender
 log4j.appender.stderr.Target=System.err
 log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
 log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
 
-#log4j.logger.org.apache.hadoop=ERROR
-log4j.logger.org.apache.kylin=DEBUG
-log4j.logger.org.springframework=WARN
-log4j.logger.org.apache.kylin.tool.shaded=INFO
\ No newline at end of file
+
+# Settings to quiet third party logs that are too verbose
+log4j.logger.org.spark-project.jetty=WARN
+log4j.logger.org.spark-project.jetty.util.component.AbstractLifeCycle=ERROR
+log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
+log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
+log4j.logger.org.apache.parquet=ERROR
+log4j.logger.parquet=ERROR
+
+# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
+log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
+log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
+log4j.logger.org.apache.spark.sql=WARN
+
+log4j.logger.org.apache.kylin=DEBUG
\ No newline at end of file
diff --git a/k8s/developments/config/kylin/kylin-tools-log4j.properties b/kubernetes/config/production/kylin-all/kylin-tools-log4j.properties
similarity index 100%
copy from k8s/developments/config/kylin/kylin-tools-log4j.properties
copy to kubernetes/config/production/kylin-all/kylin-tools-log4j.properties
diff --git a/kubernetes/config/production/kylin-all/kylin.properties b/kubernetes/config/production/kylin-all/kylin.properties
new file mode 100644
index 0000000..c4d3cc1
--- /dev/null
+++ b/kubernetes/config/production/kylin-all/kylin.properties
@@ -0,0 +1,419 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+kylin.cache.memcached.hosts=10.1.2.42:11211
+kylin.query.cache-signature-enabled=true
+kylin.query.lazy-query-enabled=true
+kylin.metrics.memcached.enabled=true
+kylin.query.segment-cache-enabled=true
+
+kylin.metrics.monitor-enabled=true
+kylin.metrics.reporter-query-enabled=true
+kylin.metrics.reporter-job-enabled=true
+# The below commented values will effect as default settings
+# Uncomment and override them if necessary
+
+
+
+#
+#### METADATA | ENV ###
+#
+## The metadata store in hbase
+kylin.metadata.url=kylin_metadata_k8s_prod@hbase
+#
+## metadata cache sync retry times
+#kylin.metadata.sync-retries=3
+#
+## Working folder in HDFS, better be qualified absolute path, make sure user has the right permission to this directory
+kylin.env.hdfs-working-dir=/kylin/apache_kylin
+#
+## DEV|QA|PROD. DEV will turn on some dev features, QA and PROD has no difference in terms of functions.
+#kylin.env=QA
+#
+## kylin zk base path
+#kylin.env.zookeeper-base-path=/kylin
+#
+#### SERVER | WEB | RESTCLIENT ###
+#
+## Kylin server mode, valid value [all, query, job]
+kylin.server.mode=all
+#
+## List of web servers in use, this enables one web server instance to sync up with other servers.
+#kylin.server.cluster-servers=localhost:7070
+#
+## Display timezone on UI,format like[GMT+N or GMT-N]
+#kylin.web.timezone=
+#
+## Timeout value for the queries submitted through the Web UI, in milliseconds
+#kylin.web.query-timeout=300000
+#
+#kylin.web.cross-domain-enabled=true
+#
+##allow user to export query result
+#kylin.web.export-allow-admin=true
+#kylin.web.export-allow-other=true
+#
+## Hide measures in measure list of cube designer, separate by comma
+#kylin.web.hide-measures=RAW
+#
+##max connections of one route
+#kylin.restclient.connection.default-max-per-route=20
+#
+##max connections of one rest-client
+#kylin.restclient.connection.max-total=200
+#
+#### PUBLIC CONFIG ###
+#kylin.engine.default=2
+#kylin.storage.default=2
+#kylin.web.hive-limit=20
+#kylin.web.help.length=4
+#kylin.web.help.0=start|Getting Started|http://kylin.apache.org/docs/tutorial/kylin_sample.html
+#kylin.web.help.1=odbc|ODBC Driver|http://kylin.apache.org/docs/tutorial/odbc.html
+#kylin.web.help.2=tableau|Tableau Guide|http://kylin.apache.org/docs/tutorial/tableau_91.html
+#kylin.web.help.3=onboard|Cube Design Tutorial|http://kylin.apache.org/docs/howto/howto_optimize_cubes.html
+#kylin.web.link-streaming-guide=http://kylin.apache.org/
+#kylin.htrace.show-gui-trace-toggle=false
+#kylin.web.link-hadoop=
+#kylin.web.link-diagnostic=
+#kylin.web.contact-mail=
+#kylin.server.external-acl-provider=
+#
+## Default time filter for job list, 0->current day, 1->last one day, 2->last one week, 3->last one year, 4->all
+#kylin.web.default-time-filter=1
+#
+#### SOURCE ###
+#
+## Hive client, valid value [cli, beeline]
+#kylin.source.hive.client=cli
+#
+## Absolute path to beeline shell, can be set to spark beeline instead of the default hive beeline on PATH
+#kylin.source.hive.beeline-shell=beeline
+#
+## Parameters for beeline client, only necessary if hive client is beeline
+##kylin.source.hive.beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+## While hive client uses above settings to read hive table metadata,
+## table operations can go through a separate SparkSQL command line, given SparkSQL connects to the same Hive metastore.
+#kylin.source.hive.enable-sparksql-for-table-ops=false
+##kylin.source.hive.sparksql-beeline-shell=/path/to/spark-client/bin/beeline
+##kylin.source.hive.sparksql-beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+#kylin.source.hive.keep-flat-table=false
+#
+## Hive database name for putting the intermediate flat tables
+#kylin.source.hive.database-for-flat-table=default
+#
+## Whether redistribute the intermediate flat table before building
+#kylin.source.hive.redistribute-flat-table=true
+#
+#
+#### STORAGE ###
+#
+## The storage for final cube file in hbase
+#kylin.storage.url=hbase
+#
+## The prefix of hbase table
+kylin.storage.hbase.table-name-prefix=K8S_
+#
+## The namespace for hbase storage
+kylin.storage.hbase.namespace=lacus
+#
+## Compression codec for htable, valid value [none, snappy, lzo, gzip, lz4]
+#kylin.storage.hbase.compression-codec=none
+#
+## HBase Cluster FileSystem, which serving hbase, format as hdfs://hbase-cluster:8020
+## Leave empty if hbase running on same cluster with hive and mapreduce
+##kylin.storage.hbase.cluster-fs=
+#
+## The cut size for hbase region, in GB.
+#kylin.storage.hbase.region-cut-gb=5
+#
+## The hfile size of GB, smaller hfile leading to the converting hfile MR has more reducers and be faster.
+## Set 0 to disable this optimization.
+#kylin.storage.hbase.hfile-size-gb=2
+#
+#kylin.storage.hbase.min-region-count=1
+#kylin.storage.hbase.max-region-count=500
+#
+## Optional information for the owner of kylin platform, it can be your team's email
+## Currently it will be attached to each kylin's htable attribute
+#kylin.storage.hbase.owner-tag=whoami@kylin.apache.org
+#
+#kylin.storage.hbase.coprocessor-mem-gb=3
+#
+## By default kylin can spill query's intermediate results to disks when it's consuming too much memory.
+## Set it to false if you want query to abort immediately in such condition.
+#kylin.storage.partition.aggr-spill-enabled=true
+#
+## The maximum number of bytes each coprocessor is allowed to scan.
+## To allow arbitrary large scan, you can set it to 0.
+#kylin.storage.partition.max-scan-bytes=3221225472
+#
+## The default coprocessor timeout is (hbase.rpc.timeout * 0.9) / 1000 seconds,
+## You can set it to a smaller value. 0 means use default.
+## kylin.storage.hbase.coprocessor-timeout-seconds=0
+#
+## clean real storage after delete operation
+## if you want to delete the real storage like htable of deleting segment, you can set it to true
+#kylin.storage.clean-after-delete-operation=false
+#
+#### JOB ###
+#
+## Max job retry on error, default 0: no retry
+#kylin.job.retry=0
+#
+## Max count of concurrent jobs running
+#kylin.job.max-concurrent-jobs=10
+#
+## The percentage of the sampling, default 100%
+#kylin.job.sampling-percentage=100
+#
+## If true, will send email notification on job complete
+##kylin.job.notification-enabled=true
+##kylin.job.notification-mail-enable-starttls=true
+##kylin.job.notification-mail-host=smtp.office365.com
+##kylin.job.notification-mail-port=587
+##kylin.job.notification-mail-username=kylin@example.com
+##kylin.job.notification-mail-password=mypassword
+##kylin.job.notification-mail-sender=kylin@example.com
+kylin.job.scheduler.provider.100=org.apache.kylin.job.impl.curator.CuratorScheduler
+kylin.job.scheduler.default=100
+#
+#### ENGINE ###
+#
+## Time interval to check hadoop job status
+#kylin.engine.mr.yarn-check-interval-seconds=10
+#
+#kylin.engine.mr.reduce-input-mb=500
+#
+#kylin.engine.mr.max-reducer-number=500
+#
+#kylin.engine.mr.mapper-input-rows=1000000
+#
+## Enable dictionary building in MR reducer
+#kylin.engine.mr.build-dict-in-reducer=true
+#
+## Number of reducers for fetching UHC column distinct values
+#kylin.engine.mr.uhc-reducer-count=3
+#
+## Whether using an additional step to build UHC dictionary
+#kylin.engine.mr.build-uhc-dict-in-additional-step=false
+#
+#
+#### CUBE | DICTIONARY ###
+#
+#kylin.cube.cuboid-scheduler=org.apache.kylin.cube.cuboid.DefaultCuboidScheduler
+#kylin.cube.segment-advisor=org.apache.kylin.cube.CubeSegmentAdvisor
+#
+## 'auto', 'inmem', 'layer' or 'random' for testing 
+#kylin.cube.algorithm=layer
+#
+## A smaller threshold prefers layer, a larger threshold prefers in-mem
+#kylin.cube.algorithm.layer-or-inmem-threshold=7
+#
+## auto use inmem algorithm:
+## 1, cube planner optimize job
+## 2, no source record
+#kylin.cube.algorithm.inmem-auto-optimize=true
+#
+#kylin.cube.aggrgroup.max-combination=32768
+#
+#kylin.snapshot.max-mb=300
+#
+#kylin.cube.cubeplanner.enabled=true
+#kylin.cube.cubeplanner.enabled-for-existing-cube=true
+#kylin.cube.cubeplanner.expansion-threshold=15.0
+#kylin.cube.cubeplanner.recommend-cache-max-size=200
+#kylin.cube.cubeplanner.mandatory-rollup-threshold=1000
+#kylin.cube.cubeplanner.algorithm-threshold-greedy=8
+#kylin.cube.cubeplanner.algorithm-threshold-genetic=23
+#
+#
+#### QUERY ###
+#
+## Controls the maximum number of bytes a query is allowed to scan storage.
+## The default value 0 means no limit.
+## The counterpart kylin.storage.partition.max-scan-bytes sets the maximum per coprocessor.
+#kylin.query.max-scan-bytes=0
+#
+kylin.query.cache-enabled=true
+#
+## Controls extras properties for Calcite jdbc driver
+## all extras properties should undder prefix "kylin.query.calcite.extras-props."
+## case sensitive, default: true, to enable case insensitive set it to false
+## @see org.apache.calcite.config.CalciteConnectionProperty.CASE_SENSITIVE
+#kylin.query.calcite.extras-props.caseSensitive=true
+## how to handle unquoted identity, defualt: TO_UPPER, available options: UNCHANGED, TO_UPPER, TO_LOWER
+## @see org.apache.calcite.config.CalciteConnectionProperty.UNQUOTED_CASING
+#kylin.query.calcite.extras-props.unquotedCasing=TO_UPPER
+## quoting method, default: DOUBLE_QUOTE, available options: DOUBLE_QUOTE, BACK_TICK, BRACKET
+## @see org.apache.calcite.config.CalciteConnectionProperty.QUOTING
+#kylin.query.calcite.extras-props.quoting=DOUBLE_QUOTE
+## change SqlConformance from DEFAULT to LENIENT to enable group by ordinal
+## @see org.apache.calcite.sql.validate.SqlConformance.SqlConformanceEnum
+#kylin.query.calcite.extras-props.conformance=LENIENT
+#
+## TABLE ACL
+#kylin.query.security.table-acl-enabled=true
+#
+## Usually should not modify this
+#kylin.query.interceptors=org.apache.kylin.rest.security.TableInterceptor
+#
+#kylin.query.escape-default-keyword=false
+#
+## Usually should not modify this
+#kylin.query.transformers=org.apache.kylin.query.util.DefaultQueryTransformer,org.apache.kylin.query.util.KeywordDefaultDirtyHack
+#
+#### SECURITY ###
+#
+## Spring security profile, options: testing, ldap, saml
+## with "testing" profile, user can use pre-defined name/pwd like KYLIN/ADMIN to login
+#kylin.security.profile=testing
+#
+## Admin roles in LDAP, for ldap and saml
+#kylin.security.acl.admin-role=admin
+#
+## LDAP authentication configuration
+#kylin.security.ldap.connection-server=ldap://ldap_server:389
+#kylin.security.ldap.connection-username=
+#kylin.security.ldap.connection-password=
+#
+## LDAP user account directory;
+#kylin.security.ldap.user-search-base=
+#kylin.security.ldap.user-search-pattern=
+#kylin.security.ldap.user-group-search-base=
+#kylin.security.ldap.user-group-search-filter=(|(member={0})(memberUid={1}))
+#
+## LDAP service account directory
+#kylin.security.ldap.service-search-base=
+#kylin.security.ldap.service-search-pattern=
+#kylin.security.ldap.service-group-search-base=
+#
+### SAML configurations for SSO
+## SAML IDP metadata file location
+#kylin.security.saml.metadata-file=classpath:sso_metadata.xml
+#kylin.security.saml.metadata-entity-base-url=https://hostname/kylin
+#kylin.security.saml.keystore-file=classpath:samlKeystore.jks
+#kylin.security.saml.context-scheme=https
+#kylin.security.saml.context-server-name=hostname
+#kylin.security.saml.context-server-port=443
+#kylin.security.saml.context-path=/kylin
+#
+#### SPARK ENGINE CONFIGS ###
+#
+## Hadoop conf folder, will export this as "HADOOP_CONF_DIR" to run spark-submit
+## This must contain site xmls of core, yarn, hive, and hbase in one folder
+##kylin.env.hadoop-conf-dir=/etc/hadoop/conf
+#
+## Estimate the RDD partition numbers
+#kylin.engine.spark.rdd-partition-cut-mb=10
+#
+## Minimal partition numbers of rdd
+#kylin.engine.spark.min-partition=1
+#
+## Max partition numbers of rdd
+#kylin.engine.spark.max-partition=5000
+#
+## Spark conf (default is in spark/conf/spark-defaults.conf)
+#kylin.engine.spark-conf.spark.master=yarn
+##kylin.engine.spark-conf.spark.submit.deployMode=cluster
+#kylin.engine.spark-conf.spark.yarn.queue=default
+#kylin.engine.spark-conf.spark.driver.memory=2G
+#kylin.engine.spark-conf.spark.executor.memory=4G
+#kylin.engine.spark-conf.spark.executor.instances=40
+#kylin.engine.spark-conf.spark.yarn.executor.memoryOverhead=1024
+#kylin.engine.spark-conf.spark.shuffle.service.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.dir=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.history.fs.logDirectory=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false
+#
+#### Spark conf for specific job
+#kylin.engine.spark-conf-mergedict.spark.executor.memory=6G
+#kylin.engine.spark-conf-mergedict.spark.memory.fraction=0.2
+#
+## manually upload spark-assembly jar to HDFS and then set this property will avoid repeatedly uploading jar at runtime
+##kylin.engine.spark-conf.spark.yarn.archive=hdfs://namenode:8020/kylin/spark/spark-libs.jar
+##kylin.engine.spark-conf.spark.io.compression.codec=org.apache.spark.io.SnappyCompressionCodec
+#
+## uncomment for HDP
+##kylin.engine.spark-conf.spark.driver.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current
+#
+#
+#### QUERY PUSH DOWN ###
+#
+##kylin.query.pushdown.runner-class-name=org.apache.kylin.query.adhoc.PushDownRunnerJdbcImpl
+#
+##kylin.query.pushdown.update-enabled=false
+##kylin.query.pushdown.jdbc.url=jdbc:hive2://sandbox:10000/default
+##kylin.query.pushdown.jdbc.driver=org.apache.hive.jdbc.HiveDriver
+##kylin.query.pushdown.jdbc.username=hive
+##kylin.query.pushdown.jdbc.password=
+#
+##kylin.query.pushdown.jdbc.pool-max-total=8
+##kylin.query.pushdown.jdbc.pool-max-idle=8
+##kylin.query.pushdown.jdbc.pool-min-idle=0
+#
+#### JDBC Data Source
+##kylin.source.jdbc.connection-url=
+##kylin.source.jdbc.driver=
+##kylin.source.jdbc.dialect=
+##kylin.source.jdbc.user=
+##kylin.source.jdbc.pass=
+##kylin.source.jdbc.sqoop-home=
+##kylin.source.jdbc.filed-delimiter=|
+#
+#### Livy with Kylin
+##kylin.engine.livy-conf.livy-enabled=false
+##kylin.engine.livy-conf.livy-url=http://LivyHost:8998
+##kylin.engine.livy-conf.livy-key.file=hdfs:///path-to-kylin-job-jar
+##kylin.engine.livy-conf.livy-arr.jars=hdfs:///path-to-hadoop-dependency-jar
+#
+#
+#### Realtime OLAP ###
+#
+## Where should local segment cache located, for absolute path, the real path will be ${KYLIN_HOME}/${kylin.stream.index.path}
+#kylin.stream.index.path=stream_index
+#
+## The timezone for Derived Time Column like hour_start, try set to GMT+N, please check detail at KYLIN-4010
+#kylin.stream.event.timezone=
+#
+## Debug switch for print realtime global dict encode information, please check detail at KYLIN-4141
+#kylin.stream.print-realtime-dict-enabled=false
+#
+## Should enable latest coordinator, please check detail at KYLIN-4167
+#kylin.stream.new.coordinator-enabled=true
+#
+## In which way should we collect receiver's metrics info
+##kylin.stream.metrics.option=console/csv/jmx
+#
+## When enable a streaming cube, should cousme from earliest offset or least offset
+#kylin.stream.consume.offsets.latest=true
+#
+## The parallelism of scan in receiver side
+#kylin.stream.receiver.use-threads-per-query=8
+#
+## How coordinator/receiver register itself into StreamMetadata, there are three option:
+## 1. hostname:port, then kylin will set the config ip and port as the currentNode;
+## 2. port, then kylin will get the node's hostname and append port as the currentNode;
+## 3. not set, then kylin will get the node hostname address and set the hostname and defaultPort(7070 for coordinator or 9090 for receiver) as the currentNode.
+##kylin.stream.node=
+#
+## Auto resubmit after job be discarded
+#kylin.stream.auto-resubmit-after-discard-enabled=true
diff --git a/kubernetes/config/production/kylin-all/kylin_hive_conf.xml b/kubernetes/config/production/kylin-all/kylin_hive_conf.xml
new file mode 100644
index 0000000..f01d08e
--- /dev/null
+++ b/kubernetes/config/production/kylin-all/kylin_hive_conf.xml
@@ -0,0 +1,102 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>hive.exec.compress.output</name>
+        <value>true</value>
+        <description>Enable compress</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join</name>
+        <value>true</value>
+        <description>Enables the optimization about converting common join into mapjoin</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join.noconditionaltask</name>
+        <value>true</value>
+        <description>enable map-side join</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join.noconditionaltask.size</name>
+        <value>100000000</value>
+        <description>enable map-side join</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description></description>
+    </property>
+    -->
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description></description>
+    </property>
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    -->
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>hive.stats.autogather</name>
+        <value>true</value>
+        <description>Collect statistics for newly created intermediate table</description>
+    </property>
+
+    <property>
+        <name>hive.merge.mapfiles</name>
+        <value>false</value>
+        <description>Disable Hive's auto merge</description>
+    </property>
+
+    <property>
+        <name>hive.merge.mapredfiles</name>
+        <value>false</value>
+        <description>Disable Hive's auto merge</description>
+    </property>
+</configuration>
diff --git a/kubernetes/config/production/kylin-all/kylin_job_conf.xml b/kubernetes/config/production/kylin-all/kylin_job_conf.xml
new file mode 100644
index 0000000..17a9145
--- /dev/null
+++ b/kubernetes/config/production/kylin-all/kylin_job_conf.xml
@@ -0,0 +1,88 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>3600000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+</configuration>
diff --git a/kubernetes/config/production/kylin-all/kylin_job_conf_cube_merge.xml b/kubernetes/config/production/kylin-all/kylin_job_conf_cube_merge.xml
new file mode 100644
index 0000000..79365ad
--- /dev/null
+++ b/kubernetes/config/production/kylin-all/kylin_job_conf_cube_merge.xml
@@ -0,0 +1,104 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>7200000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+    <!--Additional config for cube merge job, giving more memory -->
+    <property>
+        <name>mapreduce.map.memory.mb</name>
+        <value>3072</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.java.opts</name>
+        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.io.sort.mb</name>
+        <value>200</value>
+        <description></description>
+    </property>
+</configuration>
diff --git a/kubernetes/config/production/kylin-all/kylin_job_conf_inmem.xml b/kubernetes/config/production/kylin-all/kylin_job_conf_inmem.xml
new file mode 100644
index 0000000..ddda4dd
--- /dev/null
+++ b/kubernetes/config/production/kylin-all/kylin_job_conf_inmem.xml
@@ -0,0 +1,111 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>mapreduce.job.is-mem-hungry</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>7200000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+    <!--Additional config for in-mem cubing, giving mapper more memory -->
+    <property>
+        <name>mapreduce.map.memory.mb</name>
+        <value>3072</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.java.opts</name>
+        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.io.sort.mb</name>
+        <value>200</value>
+        <description></description>
+    </property>
+
+</configuration>
diff --git a/kubernetes/config/production/kylin-all/setenv-tool.sh b/kubernetes/config/production/kylin-all/setenv-tool.sh
new file mode 100644
index 0000000..487b5ef
--- /dev/null
+++ b/kubernetes/config/production/kylin-all/setenv-tool.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# source me
+
+# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
+# uncomment following to for it to take effect
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx4096M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+# See HADOOP-7154.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
+
+# uncomment following to for it to take effect(the values need adjusting to fit your env)
+# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
+if [[ -d "/usr/hdp/current/hadoop-client" ]]
+then
+   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
+   # attempt to locate JVM native libraries and set corresponding property
+   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
+   then
+      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
+   fi
+else
+   export KYLIN_EXTRA_START_OPTS=""
+   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
+   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
+fi
+
+if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
+then
+    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
+fi
+
+if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
+then
+    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
+fi
+
+if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
+then
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
+fi
diff --git a/kubernetes/config/production/kylin-all/setenv.sh b/kubernetes/config/production/kylin-all/setenv.sh
new file mode 100644
index 0000000..fa88769
--- /dev/null
+++ b/kubernetes/config/production/kylin-all/setenv.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# source me
+
+# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
+# uncomment following to for it to take effect
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx2048M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+# See HADOOP-7154.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
+
+# uncomment following to for it to take effect(the values need adjusting to fit your env)
+# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
+if [[ -d "/usr/hdp/current/hadoop-client" ]]
+then
+   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
+   # attempt to locate JVM native libraries and set corresponding property
+   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
+   then
+      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
+   fi
+else
+   export KYLIN_EXTRA_START_OPTS=""
+   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
+   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
+fi
+
+if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
+then
+    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
+fi
+
+if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
+then
+    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
+fi
+
+if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
+then
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
+fi
diff --git a/kubernetes/config/production/kylin-job/kylin-kafka-consumer.xml b/kubernetes/config/production/kylin-job/kylin-kafka-consumer.xml
new file mode 100644
index 0000000..8529a41
--- /dev/null
+++ b/kubernetes/config/production/kylin-job/kylin-kafka-consumer.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+ for more kafka consumer configs, please refer to http://kafka.apache.org/documentation#consumerconfigs
+-->
+<configuration>
+    <property>
+        <name>session.timeout.ms</name>
+        <value>10000</value>
+    </property>
+    <property>
+        <name>request.timeout.ms</name>
+        <value>20000</value>
+    </property>
+</configuration>
\ No newline at end of file
diff --git a/k8s/developments/config/kylin/kylin-server-log4j.properties b/kubernetes/config/production/kylin-job/kylin-server-log4j.properties
similarity index 100%
copy from k8s/developments/config/kylin/kylin-server-log4j.properties
copy to kubernetes/config/production/kylin-job/kylin-server-log4j.properties
diff --git a/k8s/developments/config/kylin/kylin-tools-log4j.properties b/kubernetes/config/production/kylin-job/kylin-spark-log4j.properties
similarity index 51%
copy from k8s/developments/config/kylin/kylin-tools-log4j.properties
copy to kubernetes/config/production/kylin-job/kylin-spark-log4j.properties
index 54d18c2..948fb32 100644
--- a/k8s/developments/config/kylin/kylin-tools-log4j.properties
+++ b/kubernetes/config/production/kylin-job/kylin-spark-log4j.properties
@@ -15,24 +15,29 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-
-
-# the kylin-tools-log4j.properties is mainly for configuring log properties on kylin tools, including:
-#   1. tools launched by kylin.sh script, e.g. DeployCoprocessorCLI
-#   2. DebugTomcat
-#   3. others
-#
-# It's called kylin-tools-log4j.properties so that it won't distract users from the other more important log4j config file: kylin-server-log4j.properties
-# enable this by -Dlog4j.configuration=kylin-tools-log4j.properties
-
-log4j.rootLogger=INFO,stderr
+log4j.rootCategory=WARN,stderr,stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.target=System.out
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
 
 log4j.appender.stderr=org.apache.log4j.ConsoleAppender
 log4j.appender.stderr.Target=System.err
 log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
 log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
 
-#log4j.logger.org.apache.hadoop=ERROR
-log4j.logger.org.apache.kylin=DEBUG
-log4j.logger.org.springframework=WARN
-log4j.logger.org.apache.kylin.tool.shaded=INFO
\ No newline at end of file
+
+# Settings to quiet third party logs that are too verbose
+log4j.logger.org.spark-project.jetty=WARN
+log4j.logger.org.spark-project.jetty.util.component.AbstractLifeCycle=ERROR
+log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
+log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
+log4j.logger.org.apache.parquet=ERROR
+log4j.logger.parquet=ERROR
+
+# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
+log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
+log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
+log4j.logger.org.apache.spark.sql=WARN
+
+log4j.logger.org.apache.kylin=DEBUG
\ No newline at end of file
diff --git a/k8s/developments/config/kylin/kylin-tools-log4j.properties b/kubernetes/config/production/kylin-job/kylin-tools-log4j.properties
similarity index 100%
copy from k8s/developments/config/kylin/kylin-tools-log4j.properties
copy to kubernetes/config/production/kylin-job/kylin-tools-log4j.properties
diff --git a/kubernetes/config/production/kylin-job/kylin.properties b/kubernetes/config/production/kylin-job/kylin.properties
new file mode 100644
index 0000000..43e67e2
--- /dev/null
+++ b/kubernetes/config/production/kylin-job/kylin.properties
@@ -0,0 +1,422 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+kylin.cache.memcached.hosts=10.1.2.42:11211
+kylin.query.cache-signature-enabled=true
+kylin.query.lazy-query-enabled=true
+kylin.metrics.memcached.enabled=true
+kylin.query.segment-cache-enabled=true
+
+
+kylin.metrics.monitor-enabled=true
+kylin.metrics.reporter-query-enabled=true
+kylin.metrics.reporter-job-enabled=true
+
+# The below commented values will effect as default settings
+# Uncomment and override them if necessary
+
+
+
+#
+#### METADATA | ENV ###
+#
+## The metadata store in hbase
+kylin.metadata.url=kylin_metadata_k8s_prod@hbase
+#
+## metadata cache sync retry times
+#kylin.metadata.sync-retries=3
+#
+## Working folder in HDFS, better be qualified absolute path, make sure user has the right permission to this directory
+#kylin.env.hdfs-working-dir=/kylin
+#
+## DEV|QA|PROD. DEV will turn on some dev features, QA and PROD has no difference in terms of functions.
+#kylin.env=QA
+#
+## kylin zk base path
+#kylin.env.zookeeper-base-path=/kylin
+#
+#### SERVER | WEB | RESTCLIENT ###
+#
+## Kylin server mode, valid value [all, query, job]
+kylin.server.mode=job
+#
+## List of web servers in use, this enables one web server instance to sync up with other servers.
+#kylin.server.cluster-servers=localhost:7070
+#
+## Display timezone on UI,format like[GMT+N or GMT-N]
+#kylin.web.timezone=
+#
+## Timeout value for the queries submitted through the Web UI, in milliseconds
+#kylin.web.query-timeout=300000
+#
+#kylin.web.cross-domain-enabled=true
+#
+##allow user to export query result
+#kylin.web.export-allow-admin=true
+#kylin.web.export-allow-other=true
+#
+## Hide measures in measure list of cube designer, separate by comma
+#kylin.web.hide-measures=RAW
+#
+##max connections of one route
+#kylin.restclient.connection.default-max-per-route=20
+#
+##max connections of one rest-client
+#kylin.restclient.connection.max-total=200
+#
+#### PUBLIC CONFIG ###
+#kylin.engine.default=2
+#kylin.storage.default=2
+#kylin.web.hive-limit=20
+#kylin.web.help.length=4
+#kylin.web.help.0=start|Getting Started|http://kylin.apache.org/docs/tutorial/kylin_sample.html
+#kylin.web.help.1=odbc|ODBC Driver|http://kylin.apache.org/docs/tutorial/odbc.html
+#kylin.web.help.2=tableau|Tableau Guide|http://kylin.apache.org/docs/tutorial/tableau_91.html
+#kylin.web.help.3=onboard|Cube Design Tutorial|http://kylin.apache.org/docs/howto/howto_optimize_cubes.html
+#kylin.web.link-streaming-guide=http://kylin.apache.org/
+#kylin.htrace.show-gui-trace-toggle=false
+#kylin.web.link-hadoop=
+#kylin.web.link-diagnostic=
+#kylin.web.contact-mail=
+#kylin.server.external-acl-provider=
+#
+## Default time filter for job list, 0->current day, 1->last one day, 2->last one week, 3->last one year, 4->all
+#kylin.web.default-time-filter=1
+#
+#### SOURCE ###
+#
+## Hive client, valid value [cli, beeline]
+#kylin.source.hive.client=cli
+#
+## Absolute path to beeline shell, can be set to spark beeline instead of the default hive beeline on PATH
+#kylin.source.hive.beeline-shell=beeline
+#
+## Parameters for beeline client, only necessary if hive client is beeline
+##kylin.source.hive.beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+## While hive client uses above settings to read hive table metadata,
+## table operations can go through a separate SparkSQL command line, given SparkSQL connects to the same Hive metastore.
+#kylin.source.hive.enable-sparksql-for-table-ops=false
+##kylin.source.hive.sparksql-beeline-shell=/path/to/spark-client/bin/beeline
+##kylin.source.hive.sparksql-beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+#kylin.source.hive.keep-flat-table=false
+#
+## Hive database name for putting the intermediate flat tables
+#kylin.source.hive.database-for-flat-table=default
+#
+## Whether redistribute the intermediate flat table before building
+#kylin.source.hive.redistribute-flat-table=true
+#
+#
+#### STORAGE ###
+#
+## The storage for final cube file in hbase
+#kylin.storage.url=hbase
+#
+## The prefix of hbase table
+kylin.storage.hbase.table-name-prefix=K8S_
+#
+## The namespace for hbase storage
+kylin.storage.hbase.namespace=lacus
+#
+## Compression codec for htable, valid value [none, snappy, lzo, gzip, lz4]
+#kylin.storage.hbase.compression-codec=none
+#
+## HBase Cluster FileSystem, which serving hbase, format as hdfs://hbase-cluster:8020
+## Leave empty if hbase running on same cluster with hive and mapreduce
+##kylin.storage.hbase.cluster-fs=
+#
+## The cut size for hbase region, in GB.
+#kylin.storage.hbase.region-cut-gb=5
+#
+## The hfile size of GB, smaller hfile leading to the converting hfile MR has more reducers and be faster.
+## Set 0 to disable this optimization.
+#kylin.storage.hbase.hfile-size-gb=2
+#
+#kylin.storage.hbase.min-region-count=1
+#kylin.storage.hbase.max-region-count=500
+#
+## Optional information for the owner of kylin platform, it can be your team's email
+## Currently it will be attached to each kylin's htable attribute
+#kylin.storage.hbase.owner-tag=whoami@kylin.apache.org
+#
+#kylin.storage.hbase.coprocessor-mem-gb=3
+#
+## By default kylin can spill query's intermediate results to disks when it's consuming too much memory.
+## Set it to false if you want query to abort immediately in such condition.
+#kylin.storage.partition.aggr-spill-enabled=true
+#
+## The maximum number of bytes each coprocessor is allowed to scan.
+## To allow arbitrary large scan, you can set it to 0.
+#kylin.storage.partition.max-scan-bytes=3221225472
+#
+## The default coprocessor timeout is (hbase.rpc.timeout * 0.9) / 1000 seconds,
+## You can set it to a smaller value. 0 means use default.
+## kylin.storage.hbase.coprocessor-timeout-seconds=0
+#
+## clean real storage after delete operation
+## if you want to delete the real storage like htable of deleting segment, you can set it to true
+#kylin.storage.clean-after-delete-operation=false
+#
+#### JOB ###
+#
+## Max job retry on error, default 0: no retry
+#kylin.job.retry=0
+#
+## Max count of concurrent jobs running
+#kylin.job.max-concurrent-jobs=10
+#
+## The percentage of the sampling, default 100%
+#kylin.job.sampling-percentage=100
+#
+## If true, will send email notification on job complete
+##kylin.job.notification-enabled=true
+##kylin.job.notification-mail-enable-starttls=true
+##kylin.job.notification-mail-host=smtp.office365.com
+##kylin.job.notification-mail-port=587
+##kylin.job.notification-mail-username=kylin@example.com
+##kylin.job.notification-mail-password=mypassword
+##kylin.job.notification-mail-sender=kylin@example.com
+kylin.job.scheduler.provider.100=org.apache.kylin.job.impl.curator.CuratorScheduler
+kylin.job.scheduler.default=100
+#
+#### ENGINE ###
+#
+## Time interval to check hadoop job status
+#kylin.engine.mr.yarn-check-interval-seconds=10
+#
+#kylin.engine.mr.reduce-input-mb=500
+#
+#kylin.engine.mr.max-reducer-number=500
+#
+#kylin.engine.mr.mapper-input-rows=1000000
+#
+## Enable dictionary building in MR reducer
+#kylin.engine.mr.build-dict-in-reducer=true
+#
+## Number of reducers for fetching UHC column distinct values
+#kylin.engine.mr.uhc-reducer-count=3
+#
+## Whether using an additional step to build UHC dictionary
+#kylin.engine.mr.build-uhc-dict-in-additional-step=false
+#
+#
+#### CUBE | DICTIONARY ###
+#
+#kylin.cube.cuboid-scheduler=org.apache.kylin.cube.cuboid.DefaultCuboidScheduler
+#kylin.cube.segment-advisor=org.apache.kylin.cube.CubeSegmentAdvisor
+#
+## 'auto', 'inmem', 'layer' or 'random' for testing 
+#kylin.cube.algorithm=layer
+#
+## A smaller threshold prefers layer, a larger threshold prefers in-mem
+#kylin.cube.algorithm.layer-or-inmem-threshold=7
+#
+## auto use inmem algorithm:
+## 1, cube planner optimize job
+## 2, no source record
+#kylin.cube.algorithm.inmem-auto-optimize=true
+#
+#kylin.cube.aggrgroup.max-combination=32768
+#
+#kylin.snapshot.max-mb=300
+#
+#kylin.cube.cubeplanner.enabled=true
+#kylin.cube.cubeplanner.enabled-for-existing-cube=true
+#kylin.cube.cubeplanner.expansion-threshold=15.0
+#kylin.cube.cubeplanner.recommend-cache-max-size=200
+#kylin.cube.cubeplanner.mandatory-rollup-threshold=1000
+#kylin.cube.cubeplanner.algorithm-threshold-greedy=8
+#kylin.cube.cubeplanner.algorithm-threshold-genetic=23
+#
+#
+#### QUERY ###
+#
+## Controls the maximum number of bytes a query is allowed to scan storage.
+## The default value 0 means no limit.
+## The counterpart kylin.storage.partition.max-scan-bytes sets the maximum per coprocessor.
+#kylin.query.max-scan-bytes=0
+#
+kylin.query.cache-enabled=true
+#
+## Controls extras properties for Calcite jdbc driver
+## all extras properties should undder prefix "kylin.query.calcite.extras-props."
+## case sensitive, default: true, to enable case insensitive set it to false
+## @see org.apache.calcite.config.CalciteConnectionProperty.CASE_SENSITIVE
+#kylin.query.calcite.extras-props.caseSensitive=true
+## how to handle unquoted identity, defualt: TO_UPPER, available options: UNCHANGED, TO_UPPER, TO_LOWER
+## @see org.apache.calcite.config.CalciteConnectionProperty.UNQUOTED_CASING
+#kylin.query.calcite.extras-props.unquotedCasing=TO_UPPER
+## quoting method, default: DOUBLE_QUOTE, available options: DOUBLE_QUOTE, BACK_TICK, BRACKET
+## @see org.apache.calcite.config.CalciteConnectionProperty.QUOTING
+#kylin.query.calcite.extras-props.quoting=DOUBLE_QUOTE
+## change SqlConformance from DEFAULT to LENIENT to enable group by ordinal
+## @see org.apache.calcite.sql.validate.SqlConformance.SqlConformanceEnum
+#kylin.query.calcite.extras-props.conformance=LENIENT
+#
+## TABLE ACL
+#kylin.query.security.table-acl-enabled=true
+#
+## Usually should not modify this
+#kylin.query.interceptors=org.apache.kylin.rest.security.TableInterceptor
+#
+#kylin.query.escape-default-keyword=false
+#
+## Usually should not modify this
+#kylin.query.transformers=org.apache.kylin.query.util.DefaultQueryTransformer,org.apache.kylin.query.util.KeywordDefaultDirtyHack
+#
+#### SECURITY ###
+#
+## Spring security profile, options: testing, ldap, saml
+## with "testing" profile, user can use pre-defined name/pwd like KYLIN/ADMIN to login
+#kylin.security.profile=testing
+#
+## Admin roles in LDAP, for ldap and saml
+#kylin.security.acl.admin-role=admin
+#
+## LDAP authentication configuration
+#kylin.security.ldap.connection-server=ldap://ldap_server:389
+#kylin.security.ldap.connection-username=
+#kylin.security.ldap.connection-password=
+#
+## LDAP user account directory;
+#kylin.security.ldap.user-search-base=
+#kylin.security.ldap.user-search-pattern=
+#kylin.security.ldap.user-group-search-base=
+#kylin.security.ldap.user-group-search-filter=(|(member={0})(memberUid={1}))
+#
+## LDAP service account directory
+#kylin.security.ldap.service-search-base=
+#kylin.security.ldap.service-search-pattern=
+#kylin.security.ldap.service-group-search-base=
+#
+### SAML configurations for SSO
+## SAML IDP metadata file location
+#kylin.security.saml.metadata-file=classpath:sso_metadata.xml
+#kylin.security.saml.metadata-entity-base-url=https://hostname/kylin
+#kylin.security.saml.keystore-file=classpath:samlKeystore.jks
+#kylin.security.saml.context-scheme=https
+#kylin.security.saml.context-server-name=hostname
+#kylin.security.saml.context-server-port=443
+#kylin.security.saml.context-path=/kylin
+#
+#### SPARK ENGINE CONFIGS ###
+#
+## Hadoop conf folder, will export this as "HADOOP_CONF_DIR" to run spark-submit
+## This must contain site xmls of core, yarn, hive, and hbase in one folder
+##kylin.env.hadoop-conf-dir=/etc/hadoop/conf
+#
+## Estimate the RDD partition numbers
+#kylin.engine.spark.rdd-partition-cut-mb=10
+#
+## Minimal partition numbers of rdd
+#kylin.engine.spark.min-partition=1
+#
+## Max partition numbers of rdd
+#kylin.engine.spark.max-partition=5000
+#
+## Spark conf (default is in spark/conf/spark-defaults.conf)
+#kylin.engine.spark-conf.spark.master=yarn
+##kylin.engine.spark-conf.spark.submit.deployMode=cluster
+#kylin.engine.spark-conf.spark.yarn.queue=default
+#kylin.engine.spark-conf.spark.driver.memory=2G
+#kylin.engine.spark-conf.spark.executor.memory=4G
+#kylin.engine.spark-conf.spark.executor.instances=40
+#kylin.engine.spark-conf.spark.yarn.executor.memoryOverhead=1024
+#kylin.engine.spark-conf.spark.shuffle.service.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.dir=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.history.fs.logDirectory=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false
+#
+#### Spark conf for specific job
+#kylin.engine.spark-conf-mergedict.spark.executor.memory=6G
+#kylin.engine.spark-conf-mergedict.spark.memory.fraction=0.2
+#
+## manually upload spark-assembly jar to HDFS and then set this property will avoid repeatedly uploading jar at runtime
+##kylin.engine.spark-conf.spark.yarn.archive=hdfs://namenode:8020/kylin/spark/spark-libs.jar
+##kylin.engine.spark-conf.spark.io.compression.codec=org.apache.spark.io.SnappyCompressionCodec
+#
+## uncomment for HDP
+##kylin.engine.spark-conf.spark.driver.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current
+#
+#
+#### QUERY PUSH DOWN ###
+#
+##kylin.query.pushdown.runner-class-name=org.apache.kylin.query.adhoc.PushDownRunnerJdbcImpl
+#
+##kylin.query.pushdown.update-enabled=false
+##kylin.query.pushdown.jdbc.url=jdbc:hive2://sandbox:10000/default
+##kylin.query.pushdown.jdbc.driver=org.apache.hive.jdbc.HiveDriver
+##kylin.query.pushdown.jdbc.username=hive
+##kylin.query.pushdown.jdbc.password=
+#
+##kylin.query.pushdown.jdbc.pool-max-total=8
+##kylin.query.pushdown.jdbc.pool-max-idle=8
+##kylin.query.pushdown.jdbc.pool-min-idle=0
+#
+#### JDBC Data Source
+##kylin.source.jdbc.connection-url=
+##kylin.source.jdbc.driver=
+##kylin.source.jdbc.dialect=
+##kylin.source.jdbc.user=
+##kylin.source.jdbc.pass=
+##kylin.source.jdbc.sqoop-home=
+##kylin.source.jdbc.filed-delimiter=|
+#
+#### Livy with Kylin
+##kylin.engine.livy-conf.livy-enabled=false
+##kylin.engine.livy-conf.livy-url=http://LivyHost:8998
+##kylin.engine.livy-conf.livy-key.file=hdfs:///path-to-kylin-job-jar
+##kylin.engine.livy-conf.livy-arr.jars=hdfs:///path-to-hadoop-dependency-jar
+#
+#
+#### Realtime OLAP ###
+#
+## Where should local segment cache located, for absolute path, the real path will be ${KYLIN_HOME}/${kylin.stream.index.path}
+#kylin.stream.index.path=stream_index
+#
+## The timezone for Derived Time Column like hour_start, try set to GMT+N, please check detail at KYLIN-4010
+#kylin.stream.event.timezone=
+#
+## Debug switch for print realtime global dict encode information, please check detail at KYLIN-4141
+#kylin.stream.print-realtime-dict-enabled=false
+#
+## Should enable latest coordinator, please check detail at KYLIN-4167
+#kylin.stream.new.coordinator-enabled=true
+#
+## In which way should we collect receiver's metrics info
+##kylin.stream.metrics.option=console/csv/jmx
+#
+## When enable a streaming cube, should cousme from earliest offset or least offset
+#kylin.stream.consume.offsets.latest=true
+#
+## The parallelism of scan in receiver side
+#kylin.stream.receiver.use-threads-per-query=8
+#
+## How coordinator/receiver register itself into StreamMetadata, there are three option:
+## 1. hostname:port, then kylin will set the config ip and port as the currentNode;
+## 2. port, then kylin will get the node's hostname and append port as the currentNode;
+## 3. not set, then kylin will get the node hostname address and set the hostname and defaultPort(7070 for coordinator or 9090 for receiver) as the currentNode.
+##kylin.stream.node=
+#
+## Auto resubmit after job be discarded
+#kylin.stream.auto-resubmit-after-discard-enabled=true
diff --git a/kubernetes/config/production/kylin-job/kylin_hive_conf.xml b/kubernetes/config/production/kylin-job/kylin_hive_conf.xml
new file mode 100644
index 0000000..f01d08e
--- /dev/null
+++ b/kubernetes/config/production/kylin-job/kylin_hive_conf.xml
@@ -0,0 +1,102 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>hive.exec.compress.output</name>
+        <value>true</value>
+        <description>Enable compress</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join</name>
+        <value>true</value>
+        <description>Enables the optimization about converting common join into mapjoin</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join.noconditionaltask</name>
+        <value>true</value>
+        <description>enable map-side join</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join.noconditionaltask.size</name>
+        <value>100000000</value>
+        <description>enable map-side join</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description></description>
+    </property>
+    -->
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description></description>
+    </property>
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    -->
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>hive.stats.autogather</name>
+        <value>true</value>
+        <description>Collect statistics for newly created intermediate table</description>
+    </property>
+
+    <property>
+        <name>hive.merge.mapfiles</name>
+        <value>false</value>
+        <description>Disable Hive's auto merge</description>
+    </property>
+
+    <property>
+        <name>hive.merge.mapredfiles</name>
+        <value>false</value>
+        <description>Disable Hive's auto merge</description>
+    </property>
+</configuration>
diff --git a/kubernetes/config/production/kylin-job/kylin_job_conf.xml b/kubernetes/config/production/kylin-job/kylin_job_conf.xml
new file mode 100644
index 0000000..17a9145
--- /dev/null
+++ b/kubernetes/config/production/kylin-job/kylin_job_conf.xml
@@ -0,0 +1,88 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>3600000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+</configuration>
diff --git a/kubernetes/config/production/kylin-job/kylin_job_conf_cube_merge.xml b/kubernetes/config/production/kylin-job/kylin_job_conf_cube_merge.xml
new file mode 100644
index 0000000..79365ad
--- /dev/null
+++ b/kubernetes/config/production/kylin-job/kylin_job_conf_cube_merge.xml
@@ -0,0 +1,104 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>7200000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+    <!--Additional config for cube merge job, giving more memory -->
+    <property>
+        <name>mapreduce.map.memory.mb</name>
+        <value>3072</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.java.opts</name>
+        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.io.sort.mb</name>
+        <value>200</value>
+        <description></description>
+    </property>
+</configuration>
diff --git a/kubernetes/config/production/kylin-job/kylin_job_conf_inmem.xml b/kubernetes/config/production/kylin-job/kylin_job_conf_inmem.xml
new file mode 100644
index 0000000..ddda4dd
--- /dev/null
+++ b/kubernetes/config/production/kylin-job/kylin_job_conf_inmem.xml
@@ -0,0 +1,111 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>mapreduce.job.is-mem-hungry</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>7200000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+    <!--Additional config for in-mem cubing, giving mapper more memory -->
+    <property>
+        <name>mapreduce.map.memory.mb</name>
+        <value>3072</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.java.opts</name>
+        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.io.sort.mb</name>
+        <value>200</value>
+        <description></description>
+    </property>
+
+</configuration>
diff --git a/kubernetes/config/production/kylin-job/setenv-tool.sh b/kubernetes/config/production/kylin-job/setenv-tool.sh
new file mode 100644
index 0000000..487b5ef
--- /dev/null
+++ b/kubernetes/config/production/kylin-job/setenv-tool.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# source me
+
+# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
+# uncomment following to for it to take effect
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx4096M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+# See HADOOP-7154.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
+
+# uncomment following to for it to take effect(the values need adjusting to fit your env)
+# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
+if [[ -d "/usr/hdp/current/hadoop-client" ]]
+then
+   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
+   # attempt to locate JVM native libraries and set corresponding property
+   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
+   then
+      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
+   fi
+else
+   export KYLIN_EXTRA_START_OPTS=""
+   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
+   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
+fi
+
+if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
+then
+    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
+fi
+
+if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
+then
+    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
+fi
+
+if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
+then
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
+fi
diff --git a/kubernetes/config/production/kylin-job/setenv.sh b/kubernetes/config/production/kylin-job/setenv.sh
new file mode 100644
index 0000000..fa88769
--- /dev/null
+++ b/kubernetes/config/production/kylin-job/setenv.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# source me
+
+# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
+# uncomment following to for it to take effect
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx2048M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+# See HADOOP-7154.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
+
+# uncomment following to for it to take effect(the values need adjusting to fit your env)
+# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
+if [[ -d "/usr/hdp/current/hadoop-client" ]]
+then
+   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
+   # attempt to locate JVM native libraries and set corresponding property
+   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
+   then
+      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
+   fi
+else
+   export KYLIN_EXTRA_START_OPTS=""
+   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
+   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
+fi
+
+if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
+then
+    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
+fi
+
+if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
+then
+    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
+fi
+
+if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
+then
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
+fi
diff --git a/kubernetes/config/production/kylin-more/applicationContext.xml b/kubernetes/config/production/kylin-more/applicationContext.xml
new file mode 100644
index 0000000..5397044
--- /dev/null
+++ b/kubernetes/config/production/kylin-more/applicationContext.xml
@@ -0,0 +1,124 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns:context="http://www.springframework.org/schema/context"
+       xmlns:mvc="http://www.springframework.org/schema/mvc"
+       xmlns:aop="http://www.springframework.org/schema/aop"
+       xmlns:cache="http://www.springframework.org/schema/cache"
+       xmlns:p="http://www.springframework.org/schema/p"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans
+    http://www.springframework.org/schema/beans/spring-beans-4.3.xsd
+    http://www.springframework.org/schema/context
+    http://www.springframework.org/schema/context/spring-context-4.3.xsd
+
+
+    http://www.springframework.org/schema/mvc
+    http://www.springframework.org/schema/mvc/spring-mvc-4.3.xsd
+    http://www.springframework.org/schema/aop
+    http://www.springframework.org/schema/aop/spring-aop-4.3.xsd
+    http://www.springframework.org/schema/cache
+    http://www.springframework.org/schema/cache/spring-cache.xsd">
+
+    <description>Kylin Rest Service</description>
+    <context:annotation-config/>
+    <mvc:annotation-driven/>
+    <aop:aspectj-autoproxy/>
+
+    <bean class="org.apache.kylin.rest.init.InitialTaskManager"/>
+
+    <context:component-scan base-package="org.apache.kylin.rest"/>
+
+    <bean class="org.apache.kylin.rest.security.PasswordPlaceholderConfigurer">
+        <property name="ignoreResourceNotFound" value="true"/>
+    </bean>
+
+
+    <!-- Rest service binding -->
+    <bean class="org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerMapping"/>
+
+    <bean id="mappingJacksonHttpMessageConverter"
+          class="org.springframework.http.converter.json.MappingJackson2HttpMessageConverter"/>
+    <bean id="stringHttpMessageConverter"
+          class="org.springframework.http.converter.StringHttpMessageConverter"/>
+    <bean id="formHttpMessageConverter"
+          class="org.springframework.http.converter.FormHttpMessageConverter"/>
+
+    <bean class="org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter">
+        <property name="messageConverters">
+            <list>
+                <ref bean="mappingJacksonHttpMessageConverter"/>
+                <ref bean="stringHttpMessageConverter"/>
+                <ref bean="formHttpMessageConverter"/>
+            </list>
+        </property>
+    </bean>
+
+    <bean class="org.springframework.web.servlet.view.ContentNegotiatingViewResolver">
+        <property name="viewResolvers">
+            <list>
+                <bean class="org.springframework.web.servlet.view.BeanNameViewResolver"/>
+                <bean class="org.springframework.web.servlet.view.InternalResourceViewResolver">
+                    <!-- <property name="prefix" value="/WEB-INF/jsp/"/> -->
+                    <property name="suffix" value=".jsp"/>
+                </bean>
+            </list>
+        </property>
+        <property name="defaultViews">
+            <list>
+                <bean class="org.springframework.web.servlet.view.json.MappingJackson2JsonView"/>
+            </list>
+        </property>
+    </bean>
+    <!-- Rest service binding -->
+
+    <!-- Cache Config -->
+    <cache:annotation-driven/>
+
+    <beans profile="ldap,saml">
+        <bean id="ehcache"
+              class="org.springframework.cache.ehcache.EhCacheManagerFactoryBean"
+              p:configLocation="classpath:ehcache.xml" p:shared="true"/>
+
+        <bean id="cacheManager" class="org.springframework.cache.ehcache.EhCacheCacheManager"
+              p:cacheManager-ref="ehcache"/>
+    </beans>
+    <beans profile="testing">
+        <!--
+        <bean id="ehcache"
+              class="org.springframework.cache.ehcache.EhCacheManagerFactoryBean"
+              p:configLocation="classpath:ehcache-test.xml" p:shared="true"/>
+
+        <bean id="cacheManager" class="org.springframework.cache.ehcache.EhCacheCacheManager"
+              p:cacheManager-ref="ehcache"/>
+        -->
+
+
+        <bean id="ehcache" class="org.springframework.cache.ehcache.EhCacheManagerFactoryBean"
+              p:configLocation="classpath:ehcache-test.xml" p:shared="true"/>
+
+        <bean id="remoteCacheManager" class="org.apache.kylin.cache.cachemanager.MemcachedCacheManager"/>
+        <bean id="localCacheManager" class="org.apache.kylin.cache.cachemanager.InstrumentedEhCacheCacheManager"
+              p:cacheManager-ref="ehcache"/>
+        <bean id="cacheManager" class="org.apache.kylin.cache.cachemanager.RemoteLocalFailOverCacheManager"/>
+
+        <bean id="memcachedCacheConfig" class="org.apache.kylin.cache.memcached.MemcachedCacheConfig">
+            <property name="timeout" value="500"/>
+            <property name="hosts" value="${kylin.cache.memcached.hosts}"/>
+        </bean>
+
+    </beans>
+
+</beans>
\ No newline at end of file
diff --git a/kubernetes/config/production/kylin-more/ehcache-test.xml b/kubernetes/config/production/kylin-more/ehcache-test.xml
new file mode 100644
index 0000000..5bd4d13
--- /dev/null
+++ b/kubernetes/config/production/kylin-more/ehcache-test.xml
@@ -0,0 +1,30 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<ehcache maxBytesLocalHeap="256M">>
+    <cache name="StorageCache"
+           eternal="false"
+           timeToIdleSeconds="86400"
+           memoryStoreEvictionPolicy="LRU"
+            >
+        <persistence strategy="none"/>
+    </cache>
+    <cache name="ExceptionQueryCache"
+           eternal="false"
+           timeToIdleSeconds="86400"
+           memoryStoreEvictionPolicy="LRU"
+            >
+        <persistence strategy="none"/>
+    </cache>
+</ehcache>
diff --git a/kubernetes/config/production/kylin-more/ehcache.xml b/kubernetes/config/production/kylin-more/ehcache.xml
new file mode 100644
index 0000000..c9efc13
--- /dev/null
+++ b/kubernetes/config/production/kylin-more/ehcache.xml
@@ -0,0 +1,30 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<ehcache maxBytesLocalHeap="2048M">
+    <cache name="StorageCache"
+           eternal="false"
+           timeToIdleSeconds="86400"
+           memoryStoreEvictionPolicy="LRU"
+            >
+        <persistence strategy="none"/>
+    </cache>
+    <cache name="ExceptionQueryCache"
+           eternal="false"
+           timeToIdleSeconds="86400"
+           memoryStoreEvictionPolicy="LRU"
+            >
+        <persistence strategy="none"/>
+    </cache>
+</ehcache>
diff --git a/kubernetes/config/production/kylin-more/kylinMetrics.xml b/kubernetes/config/production/kylin-more/kylinMetrics.xml
new file mode 100644
index 0000000..843fb91
--- /dev/null
+++ b/kubernetes/config/production/kylin-more/kylinMetrics.xml
@@ -0,0 +1,86 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<beans xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns="http://www.springframework.org/schema/beans"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans
+            http://www.springframework.org/schema/beans/spring-beans-3.1.xsd">
+
+    <description>Kylin Metrics Related Configuration (SystemCube)</description>
+
+    <!-- A Reservoir which don't staged metrics message at all, emit it in no time. Maybe good for debug purpose.-->
+    <bean id="instantReservoir" class="org.apache.kylin.metrics.lib.impl.InstantReservoir"/>
+
+    <!-- A Reservoir which staged metrics message in memory, and emit them in fixed rate. -->
+    <bean id="blockingReservoir" class="org.apache.kylin.metrics.lib.impl.BlockingReservoir">
+        <!-- minReportSize, only if currently count of staged message exceed minReportSize, will Reservoir try to write message-->
+        <constructor-arg index="0">
+            <value>100</value>
+        </constructor-arg>
+
+        <!-- maxReportSize, max size of report in one time -->
+        <constructor-arg index="1">
+            <value>500</value>
+        </constructor-arg>
+
+        <!-- minReportTime, min duration(in minute) between two report action-->
+        <constructor-arg index="2">
+            <value>10</value>
+        </constructor-arg>
+    </bean>
+
+    <bean id="hiveSink" class="org.apache.kylin.metrics.lib.impl.hive.HiveSink"/>
+
+    <bean id="kafkaSink" class="org.apache.kylin.metrics.lib.impl.kafka.KafkaSink"/>
+
+    <bean id="initMetricsManager" class="org.springframework.beans.factory.config.MethodInvokingFactoryBean">
+        <property name="targetClass" value="org.apache.kylin.metrics.MetricsManager"/>
+        <property name="targetMethod" value="initMetricsManager"/>
+        <property name="arguments">
+            <list>
+                <ref bean="hiveSink"/>
+                <map key-type="org.apache.kylin.metrics.lib.ActiveReservoir" value-type="java.util.List">
+                    <!--
+                    <entry key-ref="instantReservoir">
+                        <list>
+                            <bean class="org.apache.kylin.common.util.Pair">
+                                <property name="first"
+                                          value="org.apache.kylin.metrics.lib.impl.kafka.KafkaReservoirReporter"/>
+                                <property name="second">
+                                    <props>
+                                        <prop key="bootstrap.servers">sandbox:9092</prop>
+                                    </props>
+                                </property>
+                            </bean>
+                        </list>
+                    </entry>
+                    -->
+                    <entry key-ref="blockingReservoir">
+                        <list>
+                            <bean class="org.apache.kylin.common.util.Pair">
+                                <property name="first"
+                                          value="org.apache.kylin.metrics.lib.impl.hive.HiveReservoirReporter"/>
+                                <property name="second">
+                                    <props>
+                                    </props>
+                                </property>
+                            </bean>
+                        </list>
+                    </entry>
+                </map>
+            </list>
+        </property>
+    </bean>
+
+</beans>
\ No newline at end of file
diff --git a/kubernetes/config/production/kylin-more/kylinSecurity.xml b/kubernetes/config/production/kylin-more/kylinSecurity.xml
new file mode 100644
index 0000000..6116433
--- /dev/null
+++ b/kubernetes/config/production/kylin-more/kylinSecurity.xml
@@ -0,0 +1,634 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:scr="http://www.springframework.org/schema/security"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns:context="http://www.springframework.org/schema/context"
+       xmlns:util="http://www.springframework.org/schema/util" xsi:schemaLocation="http://www.springframework.org/schema/beans
+	http://www.springframework.org/schema/beans/spring-beans-4.3.xsd
+	http://www.springframework.org/schema/security
+	http://www.springframework.org/schema/security/spring-security-4.2.xsd
+	http://www.springframework.org/schema/util
+	http://www.springframework.org/schema/util/spring-util-4.3.xsd
+    http://www.springframework.org/schema/context
+    http://www.springframework.org/schema/context/spring-context.xsd">
+
+    <scr:global-method-security pre-post-annotations="enabled">
+        <scr:expression-handler ref="expressionHandler"/>
+    </scr:global-method-security>
+
+
+    <!-- acl config -->
+    <bean id="aclPermissionFactory" class="org.apache.kylin.rest.security.AclPermissionFactory"/>
+
+    <bean id="expressionHandler"
+          class="org.springframework.security.access.expression.method.DefaultMethodSecurityExpressionHandler">
+        <property name="permissionEvaluator" ref="permissionEvaluator"/>
+    </bean>
+
+    <bean id="permissionEvaluator" class="org.apache.kylin.rest.security.KylinAclPermissionEvaluator">
+        <constructor-arg ref="aclService"/>
+        <constructor-arg ref="aclPermissionFactory"/>
+    </bean>
+
+    <bean id="aclAuthorizationStrategy"
+          class="org.springframework.security.acls.domain.AclAuthorizationStrategyImpl">
+        <constructor-arg>
+            <list>
+                <bean class="org.springframework.security.core.authority.SimpleGrantedAuthority">
+                    <constructor-arg value="ROLE_ADMIN"/>
+                </bean>
+                <bean class="org.springframework.security.core.authority.SimpleGrantedAuthority">
+                    <constructor-arg value="ROLE_ADMIN"/>
+                </bean>
+                <bean class="org.springframework.security.core.authority.SimpleGrantedAuthority">
+                    <constructor-arg value="ROLE_ADMIN"/>
+                </bean>
+            </list>
+        </constructor-arg>
+    </bean>
+
+    <bean id="auditLogger"
+          class="org.springframework.security.acls.domain.ConsoleAuditLogger"/>
+
+    <bean id="permissionGrantingStrategy"
+          class="org.springframework.security.acls.domain.DefaultPermissionGrantingStrategy">
+        <constructor-arg ref="auditLogger"/>
+    </bean>
+
+    <bean id="userService" class="org.apache.kylin.rest.service.KylinUserService"/>
+
+    <bean id="userGroupService" class="org.apache.kylin.rest.service.KylinUserGroupService"/>
+
+    <beans profile="ldap,saml">
+        <bean id="ldapSource"
+              class="org.springframework.security.ldap.DefaultSpringSecurityContextSource">
+            <constructor-arg value="${kylin.security.ldap.connection-server}"/>
+            <property name="userDn" value="${kylin.security.ldap.connection-username}"/>
+            <property name="password" value="${kylin.security.ldap.connection-password}"/>
+        </bean>
+
+        <bean id="kylinUserAuthProvider"
+              class="org.apache.kylin.rest.security.KylinAuthenticationProvider">
+            <constructor-arg>
+                <bean id="ldapUserAuthenticationProvider"
+                      class="org.springframework.security.ldap.authentication.LdapAuthenticationProvider">
+                    <constructor-arg>
+                        <bean class="org.springframework.security.ldap.authentication.BindAuthenticator">
+                            <constructor-arg ref="ldapSource"/>
+                            <property name="userSearch">
+                                <bean id="userSearch"
+                                      class="org.springframework.security.ldap.search.FilterBasedLdapUserSearch">
+                                    <constructor-arg index="0"
+                                                     value="${kylin.security.ldap.user-search-base}"/>
+                                    <constructor-arg index="1"
+                                                     value="${kylin.security.ldap.user-search-pattern}"/>
+                                    <constructor-arg index="2" ref="ldapSource"/>
+                                </bean>
+                            </property>
+                        </bean>
+                    </constructor-arg>
+                    <constructor-arg>
+                        <bean class="org.apache.kylin.rest.security.LDAPAuthoritiesPopulator">
+                            <constructor-arg index="0" ref="ldapSource"/>
+                            <constructor-arg index="1"
+                                             value="${kylin.security.ldap.user-group-search-base}"/>
+                            <constructor-arg index="2" value="${kylin.security.acl.admin-role}"/>
+                            <property name="groupSearchFilter" value="${kylin.security.ldap.user-group-search-filter}"/>
+                        </bean>
+                    </constructor-arg>
+                </bean>
+            </constructor-arg>
+        </bean>
+
+        <bean id="kylinServiceAccountAuthProvider"
+              class="org.apache.kylin.rest.security.KylinAuthenticationProvider">
+            <constructor-arg>
+                <bean id="ldapServiceAuthenticationProvider"
+                      class="org.springframework.security.ldap.authentication.LdapAuthenticationProvider">
+                    <constructor-arg>
+                        <bean class="org.springframework.security.ldap.authentication.BindAuthenticator">
+                            <constructor-arg ref="ldapSource"/>
+                            <property name="userSearch">
+                                <bean id="userSearch"
+                                      class="org.springframework.security.ldap.search.FilterBasedLdapUserSearch">
+                                    <constructor-arg index="0"
+                                                     value="${kylin.security.ldap.service-search-base}"/>
+                                    <constructor-arg index="1"
+                                                     value="${kylin.security.ldap.service-search-pattern}"/>
+                                    <constructor-arg index="2" ref="ldapSource"/>
+                                </bean>
+                            </property>
+                        </bean>
+                    </constructor-arg>
+                    <constructor-arg>
+                        <bean class="org.apache.kylin.rest.security.LDAPAuthoritiesPopulator">
+                            <constructor-arg index="0" ref="ldapSource"/>
+                            <constructor-arg index="1"
+                                             value="${kylin.security.ldap.service-group-search-base}"/>
+                            <constructor-arg index="2" value="${kylin.security.acl.admin-role}"/>
+                            <property name="groupSearchFilter" value="${kylin.security.ldap.user-group-search-filter}"/>
+                        </bean>
+                    </constructor-arg>
+                </bean>
+            </constructor-arg>
+        </bean>
+
+    </beans>
+
+    <beans profile="ldap">
+        <scr:authentication-manager alias="ldapAuthenticationManager">
+            <!-- do user ldap auth -->
+            <scr:authentication-provider ref="kylinUserAuthProvider"></scr:authentication-provider>
+
+            <!-- do service account ldap auth -->
+            <scr:authentication-provider
+                    ref="kylinServiceAccountAuthProvider"></scr:authentication-provider>
+        </scr:authentication-manager>
+
+    </beans>
+
+
+    <beans profile="testing">
+        <util:list id="adminAuthorities"
+                   value-type="org.springframework.security.core.authority.SimpleGrantedAuthority">
+            <value>ROLE_ADMIN</value>
+            <value>ROLE_MODELER</value>
+            <value>ROLE_ANALYST</value>
+        </util:list>
+        <util:list id="modelerAuthorities"
+                   value-type="org.springframework.security.core.authority.SimpleGrantedAuthority">
+            <value>ROLE_MODELER</value>
+            <value>ROLE_ANALYST</value>
+        </util:list>
+        <util:list id="analystAuthorities"
+                   value-type="org.springframework.security.core.authority.SimpleGrantedAuthority">
+            <value>ROLE_ANALYST</value>
+        </util:list>
+
+        <bean class="org.springframework.security.core.userdetails.User" id="adminUser">
+            <constructor-arg value="ADMIN"/>
+            <constructor-arg
+                    value="$2a$10$o3ktIWsGYxXNuUWQiYlZXOW5hWcqyNAFQsSSCSEWoC/BRVMAUjL32"/>
+            <constructor-arg ref="adminAuthorities"/>
+        </bean>
+        <bean class="org.springframework.security.core.userdetails.User" id="modelerUser">
+            <constructor-arg value="MODELER"/>
+            <constructor-arg
+                    value="$2a$10$Le5ernTeGNIARwMJsY0WaOLioNQdb0QD11DwjeyNqqNRp5NaDo2FG"/>
+            <constructor-arg ref="modelerAuthorities"/>
+        </bean>
+        <bean class="org.springframework.security.core.userdetails.User" id="analystUser">
+            <constructor-arg value="ANALYST"/>
+            <constructor-arg
+                    value="$2a$10$s4INO3XHjPP5Vm2xH027Ce9QeXWdrfq5pvzuGr9z/lQmHqi0rsbNi"/>
+            <constructor-arg ref="analystAuthorities"/>
+        </bean>
+
+        <bean id="kylinUserAuthProvider"
+              class="org.apache.kylin.rest.security.KylinAuthenticationProvider">
+            <constructor-arg>
+                <bean class="org.springframework.security.authentication.dao.DaoAuthenticationProvider">
+                    <property name="userDetailsService">
+                        <bean class="org.apache.kylin.rest.service.KylinUserService">
+                            <constructor-arg>
+                                <util:list
+                                        value-type="org.springframework.security.core.userdetails.User">
+                                    <ref bean="adminUser"></ref>
+                                    <ref bean="modelerUser"></ref>
+                                    <ref bean="analystUser"></ref>
+                                </util:list>
+                            </constructor-arg>
+                        </bean>
+                    </property>
+
+                    <property name="passwordEncoder" ref="passwordEncoder"></property>
+                </bean>
+            </constructor-arg>
+        </bean>
+
+        <!-- user auth -->
+        <bean id="passwordEncoder"
+              class="org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder"/>
+
+        <scr:authentication-manager alias="testingAuthenticationManager">
+            <!-- do user ldap auth -->
+            <scr:authentication-provider ref="kylinUserAuthProvider"></scr:authentication-provider>
+        </scr:authentication-manager>
+    </beans>
+
+
+    <beans profile="testing,ldap">
+        <scr:http auto-config="true" use-expressions="true">
+            <scr:csrf disabled="true"/>
+            <scr:http-basic entry-point-ref="unauthorisedEntryPoint"/>
+
+            <scr:intercept-url pattern="/api/user/authentication*/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/query/runningQueries" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/query/*/stop" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/query*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/metadata*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/**/metrics" access="permitAll"/>
+            <scr:intercept-url pattern="/api/cache*/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/streaming_coordinator/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/service_discovery/state/is_active_job_node" access="permitAll"/>
+            <scr:intercept-url pattern="/api/cubes/src/tables" access="hasAnyRole('ROLE_ANALYST')"/>
+            <scr:intercept-url pattern="/api/cubes*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/models*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/streaming*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/job*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/admin/public_config" access="permitAll"/>
+            <scr:intercept-url pattern="/api/admin/version" access="permitAll"/>
+            <scr:intercept-url pattern="/api/projects" access="permitAll"/>
+            <scr:intercept-url pattern="/api/admin*/**" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/tables/**/snapshotLocalCache/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/**" access="isAuthenticated()"/>
+
+            <scr:form-login login-page="/login"/>
+            <scr:logout invalidate-session="true" delete-cookies="JSESSIONID" logout-url="/j_spring_security_logout"
+                        logout-success-url="/."/>
+            <scr:session-management session-fixation-protection="newSession"/>
+        </scr:http>
+    </beans>
+
+    <beans profile="saml">
+        <!-- Enable auto-wiring -->
+        <context:annotation-config/>
+
+        <!-- Scan for auto-wiring classes in spring saml packages -->
+        <context:component-scan base-package="org.springframework.security.saml"/>
+
+        <!-- Unsecured pages -->
+        <scr:http security="none" pattern="/image/**"/>
+        <scr:http security="none" pattern="/css/**"/>
+        <scr:http security="none" pattern="/less/**"/>
+        <scr:http security="none" pattern="/fonts/**"/>
+        <scr:http security="none" pattern="/js/**"/>
+        <scr:http security="none" pattern="/login/**"/>
+        <scr:http security="none" pattern="/routes.json"/>
+
+        <!-- Secured Rest API urls with LDAP basic authentication -->
+        <scr:http pattern="/api/**" use-expressions="true"
+                  authentication-manager-ref="apiAccessAuthenticationManager">
+            <scr:csrf disabled="true"/>
+            <scr:http-basic entry-point-ref="unauthorisedEntryPoint"/>
+
+            <scr:intercept-url pattern="/api/user/authentication*/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/query/runningQueries" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/query/*/stop" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/query*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/metadata*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/**/metrics" access="permitAll"/>
+            <scr:intercept-url pattern="/api/cache*/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/streaming_coordinator/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/cubes/src/tables" access="hasAnyRole('ROLE_ANALYST')"/>
+            <scr:intercept-url pattern="/api/cubes*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/models*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/streaming*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/job*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/admin/config" access="permitAll"/>
+            <scr:intercept-url pattern="/api/projects*/*" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/admin*/**" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/tables/**/snapshotLocalCache/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/**" access="isAuthenticated()"/>
+
+            <scr:form-login login-page="/login"/>
+            <scr:logout invalidate-session="true" delete-cookies="JSESSIONID" logout-url="/j_spring_security_logout"
+                        logout-success-url="/."/>
+            <scr:session-management session-fixation-protection="newSession"/>
+        </scr:http>
+
+        <!-- Secured non-api urls with SAML SSO -->
+        <scr:http auto-config="true" entry-point-ref="samlEntryPoint" use-expressions="false"
+                  authentication-manager-ref="webAccessAuthenticationManager">
+            <scr:csrf disabled="true"/>
+            <scr:intercept-url pattern="/**" access="IS_AUTHENTICATED_FULLY"/>
+            <scr:custom-filter before="FIRST" ref="metadataGeneratorFilter"/>
+            <scr:custom-filter after="BASIC_AUTH_FILTER" ref="samlFilter"/>
+        </scr:http>
+
+
+        <!-- API authentication manager -->
+        <scr:authentication-manager id="apiAccessAuthenticationManager">
+            <scr:authentication-provider ref="kylinServiceAccountAuthProvider"/>
+            <scr:authentication-provider ref="kylinUserAuthProvider"/>
+        </scr:authentication-manager>
+
+
+        <!-- Web authentication manager -->
+        <scr:authentication-manager id="webAccessAuthenticationManager">
+            <scr:authentication-provider ref="kylinSAMLAuthenticationProvider"/>
+        </scr:authentication-manager>
+
+        <!-- Central storage of cryptographic keys -->
+        <bean id="keyManager" class="org.springframework.security.saml.key.JKSKeyManager">
+            <constructor-arg value="${kylin.security.saml.keystore-file}"/>
+            <constructor-arg type="java.lang.String" value="changeit"/>
+            <constructor-arg>
+                <map>
+                    <entry key="kylin" value="changeit"/>
+                </map>
+            </constructor-arg>
+            <constructor-arg type="java.lang.String" value="kylin"/>
+        </bean>
+
+        <!-- Filters for processing of SAML messages -->
+        <bean id="samlFilter" class="org.springframework.security.web.FilterChainProxy">
+            <scr:filter-chain-map request-matcher="ant">
+                <scr:filter-chain pattern="/saml/login/**" filters="samlEntryPoint"/>
+                <scr:filter-chain pattern="/saml/logout/**" filters="samlLogoutFilter"/>
+                <scr:filter-chain pattern="/saml/metadata/**" filters="metadataDisplayFilter"/>
+                <scr:filter-chain pattern="/saml/SSO/**" filters="samlWebSSOProcessingFilter"/>
+                <scr:filter-chain pattern="/saml/SSOHoK/**"
+                                  filters="samlWebSSOHoKProcessingFilter"/>
+                <scr:filter-chain pattern="/saml/SingleLogout/**"
+                                  filters="samlLogoutProcessingFilter"/>
+            </scr:filter-chain-map>
+        </bean>
+
+        <!-- Handler deciding where to redirect user after successful login -->
+        <bean id="successRedirectHandler"
+              class="org.springframework.security.web.authentication.SavedRequestAwareAuthenticationSuccessHandler">
+            <property name="defaultTargetUrl" value="/models"/>
+        </bean>
+
+        <!-- Handler deciding where to redirect user after failed login -->
+        <bean id="failureRedirectHandler"
+              class="org.springframework.security.web.authentication.SimpleUrlAuthenticationFailureHandler">
+            <property name="useForward" value="true"/>
+            <property name="defaultFailureUrl" value="/login"/>
+        </bean>
+
+        <!-- Handler for successful logout -->
+        <bean id="successLogoutHandler"
+              class="org.springframework.security.web.authentication.logout.SimpleUrlLogoutSuccessHandler">
+        </bean>
+
+        <!-- Logger for SAML messages and events -->
+        <bean id="samlLogger" class="org.springframework.security.saml.log.SAMLDefaultLogger"/>
+
+        <!-- Filter automatically generates default SP metadata -->
+        <bean id="metadataGeneratorFilter"
+              class="org.springframework.security.saml.metadata.MetadataGeneratorFilter">
+            <constructor-arg>
+                <bean class="org.springframework.security.saml.metadata.MetadataGenerator">
+                    <property name="extendedMetadata">
+                        <bean class="org.springframework.security.saml.metadata.ExtendedMetadata">
+                            <property name="idpDiscoveryEnabled" value="false"/>
+                        </bean>
+                    </property>
+                    <property name="entityBaseURL"
+                              value="${kylin.security.saml.metadata-entity-base-url}"/>
+                </bean>
+            </constructor-arg>
+        </bean>
+
+        <!-- Entry point to initialize authentication, default values taken from properties file -->
+        <bean id="samlEntryPoint" class="org.springframework.security.saml.SAMLEntryPoint">
+            <property name="defaultProfileOptions">
+                <bean class="org.springframework.security.saml.websso.WebSSOProfileOptions">
+                    <property name="includeScoping" value="false"/>
+                </bean>
+            </property>
+        </bean>
+
+        <!-- The filter is waiting for connections on URL suffixed with filterSuffix and presents SP metadata there -->
+        <bean id="metadataDisplayFilter"
+              class="org.springframework.security.saml.metadata.MetadataDisplayFilter"/>
+
+        <!-- IDP Metadata configuration - paths to metadata of IDPs in circle of trust is here -->
+        <bean id="metadata"
+              class="org.springframework.security.saml.metadata.CachingMetadataManager">
+            <constructor-arg>
+                <list>
+                    <!-- Example of classpath metadata with Extended Metadata -->
+                    <bean class="org.springframework.security.saml.metadata.ExtendedMetadataDelegate">
+                        <constructor-arg>
+                            <bean class="org.opensaml.saml2.metadata.provider.FilesystemMetadataProvider">
+                                <constructor-arg>
+                                    <value type="java.io.File">${kylin.security.saml.metadata-file}</value>
+                                </constructor-arg>
+                                <property name="parserPool" ref="parserPool"/>
+                            </bean>
+                        </constructor-arg>
+                        <constructor-arg>
+                            <bean class="org.springframework.security.saml.metadata.ExtendedMetadata">
+                            </bean>
+                        </constructor-arg>
+                        <property name="metadataTrustCheck" value="false"/>
+                    </bean>
+                </list>
+            </constructor-arg>
+        </bean>
+
+        <bean id="ldapUserAuthoritiesPopulator"
+              class="org.apache.kylin.rest.security.LDAPAuthoritiesPopulator">
+            <constructor-arg index="0" ref="ldapSource"/>
+            <constructor-arg index="1" value="${kylin.security.ldap.user-group-search-base}"/>
+            <constructor-arg index="2" value="${kylin.security.acl.admin-role}"/>
+            <property name="groupSearchFilter" value="${kylin.security.ldap.user-group-search-filter}"/>
+        </bean>
+
+        <bean id="userSearch"
+              class="org.springframework.security.ldap.search.FilterBasedLdapUserSearch">
+            <constructor-arg index="0" value="${kylin.security.ldap.user-search-base}"/>
+            <constructor-arg index="1" value="${kylin.security.ldap.user-search-pattern}"/>
+            <constructor-arg index="2" ref="ldapSource"/>
+        </bean>
+
+
+        <bean id="samlUserDetailsService"
+              class="org.apache.kylin.rest.security.SAMLUserDetailsService">
+            <constructor-arg>
+                <bean id="ldapUserDetailsService"
+                      class="org.springframework.security.ldap.userdetails.LdapUserDetailsService">
+                    <constructor-arg ref="userSearch"/>
+                    <constructor-arg ref="ldapUserAuthoritiesPopulator"/>
+                </bean>
+            </constructor-arg>
+        </bean>
+
+        <bean id="kylinSAMLAuthenticationProvider"
+              class="org.apache.kylin.rest.security.KylinAuthenticationProvider">
+            <constructor-arg>
+                <!-- SAML Authentication Provider responsible for validating of received SAML messages -->
+                <bean id="samlAuthenticationProvider"
+                      class="org.springframework.security.saml.SAMLAuthenticationProvider">
+                    <!-- OPTIONAL property: can be used to store/load user data after login -->
+                    <property name="userDetails" ref="samlUserDetailsService"/>
+                </bean>
+            </constructor-arg>
+        </bean>
+
+
+        <!-- Provider of default SAML Context -->
+        <!-- 
+        <bean id="contextProvider" class="org.springframework.security.saml.context.SAMLContextProviderImpl"/>
+        -->
+
+        <!-- Provider of a SAML Context behind a LoadBanlancer or reverse proxy -->
+        <bean id="contextProvider"
+              class="org.springframework.security.saml.context.SAMLContextProviderLB">
+            <property name="scheme" value="${kylin.security.saml.context-scheme}"/>
+            <property name="serverName" value="${kylin.security.saml.context-server-name}"/>
+            <property name="serverPort" value="${kylin.security.saml.context-server-port}"/>
+            <property name="includeServerPortInRequestURL" value="false"/>
+            <property name="contextPath" value="${kylin.security.saml.context-path}"/>
+        </bean>
+
+
+        <!-- Processing filter for WebSSO profile messages -->
+        <bean id="samlWebSSOProcessingFilter"
+              class="org.springframework.security.saml.SAMLProcessingFilter">
+            <property name="authenticationManager" ref="webAccessAuthenticationManager"/>
+            <property name="authenticationSuccessHandler" ref="successRedirectHandler"/>
+            <property name="authenticationFailureHandler" ref="failureRedirectHandler"/>
+        </bean>
+
+        <!-- Processing filter for WebSSO Holder-of-Key profile -->
+        <bean id="samlWebSSOHoKProcessingFilter"
+              class="org.springframework.security.saml.SAMLWebSSOHoKProcessingFilter">
+            <property name="authenticationManager" ref="webAccessAuthenticationManager"/>
+            <property name="authenticationSuccessHandler" ref="successRedirectHandler"/>
+            <property name="authenticationFailureHandler" ref="failureRedirectHandler"/>
+        </bean>
+
+        <!-- Logout handler terminating local session -->
+        <bean id="logoutHandler"
+              class="org.springframework.security.web.authentication.logout.SecurityContextLogoutHandler">
+            <property name="invalidateHttpSession" value="false"/>
+        </bean>
+
+        <!-- Override default logout processing filter with the one processing SAML messages -->
+        <bean id="samlLogoutFilter" class="org.springframework.security.saml.SAMLLogoutFilter">
+            <constructor-arg index="0" ref="successLogoutHandler"/>
+            <constructor-arg index="1" ref="logoutHandler"/>
+            <constructor-arg index="2" ref="logoutHandler"/>
+        </bean>
+
+        <!-- Filter processing incoming logout messages -->
+        <!-- First argument determines URL user will be redirected to after successful global logout -->
+        <bean id="samlLogoutProcessingFilter"
+              class="org.springframework.security.saml.SAMLLogoutProcessingFilter">
+            <constructor-arg index="0" ref="successLogoutHandler"/>
+            <constructor-arg index="1" ref="logoutHandler"/>
+        </bean>
+
+        <!-- Class loading incoming SAML messages from httpRequest stream -->
+        <bean id="processor" class="org.springframework.security.saml.processor.SAMLProcessorImpl">
+            <constructor-arg>
+                <list>
+                    <ref bean="redirectBinding"/>
+                    <ref bean="postBinding"/>
+                    <ref bean="artifactBinding"/>
+                    <ref bean="soapBinding"/>
+                    <ref bean="paosBinding"/>
+                </list>
+            </constructor-arg>
+        </bean>
+
+        <!-- SAML 2.0 WebSSO Assertion Consumer -->
+        <bean id="webSSOprofileConsumer"
+              class="org.springframework.security.saml.websso.WebSSOProfileConsumerImpl">
+            <property name="responseSkew" value="600"/> <!-- 10 minutes -->
+        </bean>
+
+        <!-- SAML 2.0 Holder-of-Key WebSSO Assertion Consumer -->
+        <bean id="hokWebSSOprofileConsumer"
+              class="org.springframework.security.saml.websso.WebSSOProfileConsumerHoKImpl"/>
+
+        <!-- SAML 2.0 Web SSO profile -->
+        <bean id="webSSOprofile"
+              class="org.springframework.security.saml.websso.WebSSOProfileImpl"/>
+
+        <!-- SAML 2.0 Holder-of-Key Web SSO profile -->
+        <bean id="hokWebSSOProfile"
+              class="org.springframework.security.saml.websso.WebSSOProfileConsumerHoKImpl"/>
+
+        <!-- SAML 2.0 ECP profile -->
+        <bean id="ecpprofile"
+              class="org.springframework.security.saml.websso.WebSSOProfileECPImpl"/>
+
+        <!-- SAML 2.0 Logout Profile -->
+        <bean id="logoutprofile"
+              class="org.springframework.security.saml.websso.SingleLogoutProfileImpl">
+            <property name="responseSkew" value="600"/> <!-- 10 minutes -->
+        </bean>
+
+        <!-- Bindings, encoders and decoders used for creating and parsing messages -->
+        <bean id="postBinding" class="org.springframework.security.saml.processor.HTTPPostBinding">
+            <constructor-arg ref="parserPool"/>
+            <constructor-arg ref="velocityEngine"/>
+        </bean>
+
+        <bean id="redirectBinding"
+              class="org.springframework.security.saml.processor.HTTPRedirectDeflateBinding">
+            <constructor-arg ref="parserPool"/>
+        </bean>
+
+        <bean id="artifactBinding"
+              class="org.springframework.security.saml.processor.HTTPArtifactBinding">
+            <constructor-arg ref="parserPool"/>
+            <constructor-arg ref="velocityEngine"/>
+            <constructor-arg>
+                <bean class="org.springframework.security.saml.websso.ArtifactResolutionProfileImpl">
+                    <constructor-arg>
+                        <bean class="org.apache.commons.httpclient.HttpClient">
+                            <constructor-arg>
+                                <bean class="org.apache.commons.httpclient.MultiThreadedHttpConnectionManager"/>
+                            </constructor-arg>
+                        </bean>
+                    </constructor-arg>
+                    <property name="processor">
+                        <bean class="org.springframework.security.saml.processor.SAMLProcessorImpl">
+                            <constructor-arg ref="soapBinding"/>
+                        </bean>
+                    </property>
+                </bean>
+            </constructor-arg>
+        </bean>
+
+        <bean id="soapBinding"
+              class="org.springframework.security.saml.processor.HTTPSOAP11Binding">
+            <constructor-arg ref="parserPool"/>
+        </bean>
+
+        <bean id="paosBinding"
+              class="org.springframework.security.saml.processor.HTTPPAOS11Binding">
+            <constructor-arg ref="parserPool"/>
+        </bean>
+
+        <!-- Initialization of OpenSAML library-->
+        <bean class="org.springframework.security.saml.SAMLBootstrap"/>
+
+        <!-- Initialization of the velocity engine -->
+        <bean id="velocityEngine" class="org.springframework.security.saml.util.VelocityFactory"
+              factory-method="getEngine"/>
+
+        <!-- XML parser pool needed for OpenSAML parsing -->
+        <bean id="parserPool" class="org.opensaml.xml.parse.StaticBasicParserPool"
+              init-method="initialize">
+            <property name="builderFeatures">
+                <map>
+                    <entry key="http://apache.org/xml/features/dom/defer-node-expansion"
+                           value="false"/>
+                </map>
+            </property>
+        </bean>
+
+        <bean id="parserPoolHolder"
+              class="org.springframework.security.saml.parser.ParserPoolHolder"/>
+    </beans>
+</beans>
diff --git a/kubernetes/config/production/kylin-query/kylin-kafka-consumer.xml b/kubernetes/config/production/kylin-query/kylin-kafka-consumer.xml
new file mode 100644
index 0000000..8529a41
--- /dev/null
+++ b/kubernetes/config/production/kylin-query/kylin-kafka-consumer.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+ for more kafka consumer configs, please refer to http://kafka.apache.org/documentation#consumerconfigs
+-->
+<configuration>
+    <property>
+        <name>session.timeout.ms</name>
+        <value>10000</value>
+    </property>
+    <property>
+        <name>request.timeout.ms</name>
+        <value>20000</value>
+    </property>
+</configuration>
\ No newline at end of file
diff --git a/k8s/developments/config/kylin/kylin-server-log4j.properties b/kubernetes/config/production/kylin-query/kylin-server-log4j.properties
similarity index 100%
copy from k8s/developments/config/kylin/kylin-server-log4j.properties
copy to kubernetes/config/production/kylin-query/kylin-server-log4j.properties
diff --git a/k8s/developments/config/kylin/kylin-tools-log4j.properties b/kubernetes/config/production/kylin-query/kylin-spark-log4j.properties
similarity index 51%
copy from k8s/developments/config/kylin/kylin-tools-log4j.properties
copy to kubernetes/config/production/kylin-query/kylin-spark-log4j.properties
index 54d18c2..948fb32 100644
--- a/k8s/developments/config/kylin/kylin-tools-log4j.properties
+++ b/kubernetes/config/production/kylin-query/kylin-spark-log4j.properties
@@ -15,24 +15,29 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-
-
-# the kylin-tools-log4j.properties is mainly for configuring log properties on kylin tools, including:
-#   1. tools launched by kylin.sh script, e.g. DeployCoprocessorCLI
-#   2. DebugTomcat
-#   3. others
-#
-# It's called kylin-tools-log4j.properties so that it won't distract users from the other more important log4j config file: kylin-server-log4j.properties
-# enable this by -Dlog4j.configuration=kylin-tools-log4j.properties
-
-log4j.rootLogger=INFO,stderr
+log4j.rootCategory=WARN,stderr,stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.target=System.out
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
 
 log4j.appender.stderr=org.apache.log4j.ConsoleAppender
 log4j.appender.stderr.Target=System.err
 log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
 log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
 
-#log4j.logger.org.apache.hadoop=ERROR
-log4j.logger.org.apache.kylin=DEBUG
-log4j.logger.org.springframework=WARN
-log4j.logger.org.apache.kylin.tool.shaded=INFO
\ No newline at end of file
+
+# Settings to quiet third party logs that are too verbose
+log4j.logger.org.spark-project.jetty=WARN
+log4j.logger.org.spark-project.jetty.util.component.AbstractLifeCycle=ERROR
+log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
+log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
+log4j.logger.org.apache.parquet=ERROR
+log4j.logger.parquet=ERROR
+
+# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
+log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
+log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
+log4j.logger.org.apache.spark.sql=WARN
+
+log4j.logger.org.apache.kylin=DEBUG
\ No newline at end of file
diff --git a/k8s/developments/config/kylin/kylin-tools-log4j.properties b/kubernetes/config/production/kylin-query/kylin-tools-log4j.properties
similarity index 100%
copy from k8s/developments/config/kylin/kylin-tools-log4j.properties
copy to kubernetes/config/production/kylin-query/kylin-tools-log4j.properties
diff --git a/kubernetes/config/production/kylin-query/kylin.properties b/kubernetes/config/production/kylin-query/kylin.properties
new file mode 100644
index 0000000..1c5646f
--- /dev/null
+++ b/kubernetes/config/production/kylin-query/kylin.properties
@@ -0,0 +1,419 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+kylin.cache.memcached.hosts=10.1.2.42:11211
+kylin.query.cache-signature-enabled=true
+kylin.query.lazy-query-enabled=true
+kylin.metrics.memcached.enabled=true
+kylin.query.segment-cache-enabled=true
+
+kylin.metrics.monitor-enabled=true
+kylin.metrics.reporter-query-enabled=true
+kylin.metrics.reporter-job-enabled=true
+# The below commented values will effect as default settings
+# Uncomment and override them if necessary
+
+
+
+#
+#### METADATA | ENV ###
+#
+## The metadata store in hbase
+kylin.metadata.url=kylin_metadata_k8s_prod@hbase
+#
+## metadata cache sync retry times
+#kylin.metadata.sync-retries=3
+#
+## Working folder in HDFS, better be qualified absolute path, make sure user has the right permission to this directory
+kylin.env.hdfs-working-dir=/kylin/apache_kylin
+#
+## DEV|QA|PROD. DEV will turn on some dev features, QA and PROD has no difference in terms of functions.
+#kylin.env=QA
+#
+## kylin zk base path
+#kylin.env.zookeeper-base-path=/kylin
+#
+#### SERVER | WEB | RESTCLIENT ###
+#
+## Kylin server mode, valid value [all, query, job]
+kylin.server.mode=query
+#
+## List of web servers in use, this enables one web server instance to sync up with other servers.
+#kylin.server.cluster-servers=localhost:7070
+#
+## Display timezone on UI,format like[GMT+N or GMT-N]
+#kylin.web.timezone=
+#
+## Timeout value for the queries submitted through the Web UI, in milliseconds
+#kylin.web.query-timeout=300000
+#
+#kylin.web.cross-domain-enabled=true
+#
+##allow user to export query result
+#kylin.web.export-allow-admin=true
+#kylin.web.export-allow-other=true
+#
+## Hide measures in measure list of cube designer, separate by comma
+#kylin.web.hide-measures=RAW
+#
+##max connections of one route
+#kylin.restclient.connection.default-max-per-route=20
+#
+##max connections of one rest-client
+#kylin.restclient.connection.max-total=200
+#
+#### PUBLIC CONFIG ###
+#kylin.engine.default=2
+#kylin.storage.default=2
+#kylin.web.hive-limit=20
+#kylin.web.help.length=4
+#kylin.web.help.0=start|Getting Started|http://kylin.apache.org/docs/tutorial/kylin_sample.html
+#kylin.web.help.1=odbc|ODBC Driver|http://kylin.apache.org/docs/tutorial/odbc.html
+#kylin.web.help.2=tableau|Tableau Guide|http://kylin.apache.org/docs/tutorial/tableau_91.html
+#kylin.web.help.3=onboard|Cube Design Tutorial|http://kylin.apache.org/docs/howto/howto_optimize_cubes.html
+#kylin.web.link-streaming-guide=http://kylin.apache.org/
+#kylin.htrace.show-gui-trace-toggle=false
+#kylin.web.link-hadoop=
+#kylin.web.link-diagnostic=
+#kylin.web.contact-mail=
+#kylin.server.external-acl-provider=
+#
+## Default time filter for job list, 0->current day, 1->last one day, 2->last one week, 3->last one year, 4->all
+#kylin.web.default-time-filter=1
+#
+#### SOURCE ###
+#
+## Hive client, valid value [cli, beeline]
+#kylin.source.hive.client=cli
+#
+## Absolute path to beeline shell, can be set to spark beeline instead of the default hive beeline on PATH
+#kylin.source.hive.beeline-shell=beeline
+#
+## Parameters for beeline client, only necessary if hive client is beeline
+##kylin.source.hive.beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+## While hive client uses above settings to read hive table metadata,
+## table operations can go through a separate SparkSQL command line, given SparkSQL connects to the same Hive metastore.
+#kylin.source.hive.enable-sparksql-for-table-ops=false
+##kylin.source.hive.sparksql-beeline-shell=/path/to/spark-client/bin/beeline
+##kylin.source.hive.sparksql-beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+#kylin.source.hive.keep-flat-table=false
+#
+## Hive database name for putting the intermediate flat tables
+#kylin.source.hive.database-for-flat-table=default
+#
+## Whether redistribute the intermediate flat table before building
+#kylin.source.hive.redistribute-flat-table=true
+#
+#
+#### STORAGE ###
+#
+## The storage for final cube file in hbase
+#kylin.storage.url=hbase
+#
+## The prefix of hbase table
+kylin.storage.hbase.table-name-prefix=K8S_
+#
+## The namespace for hbase storage
+kylin.storage.hbase.namespace=lacus
+#
+## Compression codec for htable, valid value [none, snappy, lzo, gzip, lz4]
+#kylin.storage.hbase.compression-codec=none
+#
+## HBase Cluster FileSystem, which serving hbase, format as hdfs://hbase-cluster:8020
+## Leave empty if hbase running on same cluster with hive and mapreduce
+##kylin.storage.hbase.cluster-fs=
+#
+## The cut size for hbase region, in GB.
+#kylin.storage.hbase.region-cut-gb=5
+#
+## The hfile size of GB, smaller hfile leading to the converting hfile MR has more reducers and be faster.
+## Set 0 to disable this optimization.
+#kylin.storage.hbase.hfile-size-gb=2
+#
+#kylin.storage.hbase.min-region-count=1
+#kylin.storage.hbase.max-region-count=500
+#
+## Optional information for the owner of kylin platform, it can be your team's email
+## Currently it will be attached to each kylin's htable attribute
+#kylin.storage.hbase.owner-tag=whoami@kylin.apache.org
+#
+#kylin.storage.hbase.coprocessor-mem-gb=3
+#
+## By default kylin can spill query's intermediate results to disks when it's consuming too much memory.
+## Set it to false if you want query to abort immediately in such condition.
+#kylin.storage.partition.aggr-spill-enabled=true
+#
+## The maximum number of bytes each coprocessor is allowed to scan.
+## To allow arbitrary large scan, you can set it to 0.
+#kylin.storage.partition.max-scan-bytes=3221225472
+#
+## The default coprocessor timeout is (hbase.rpc.timeout * 0.9) / 1000 seconds,
+## You can set it to a smaller value. 0 means use default.
+## kylin.storage.hbase.coprocessor-timeout-seconds=0
+#
+## clean real storage after delete operation
+## if you want to delete the real storage like htable of deleting segment, you can set it to true
+#kylin.storage.clean-after-delete-operation=false
+#
+#### JOB ###
+#
+## Max job retry on error, default 0: no retry
+#kylin.job.retry=0
+#
+## Max count of concurrent jobs running
+#kylin.job.max-concurrent-jobs=10
+#
+## The percentage of the sampling, default 100%
+#kylin.job.sampling-percentage=100
+#
+## If true, will send email notification on job complete
+##kylin.job.notification-enabled=true
+##kylin.job.notification-mail-enable-starttls=true
+##kylin.job.notification-mail-host=smtp.office365.com
+##kylin.job.notification-mail-port=587
+##kylin.job.notification-mail-username=kylin@example.com
+##kylin.job.notification-mail-password=mypassword
+##kylin.job.notification-mail-sender=kylin@example.com
+kylin.job.scheduler.provider.100=org.apache.kylin.job.impl.curator.CuratorScheduler
+kylin.job.scheduler.default=100
+#
+#### ENGINE ###
+#
+## Time interval to check hadoop job status
+#kylin.engine.mr.yarn-check-interval-seconds=10
+#
+#kylin.engine.mr.reduce-input-mb=500
+#
+#kylin.engine.mr.max-reducer-number=500
+#
+#kylin.engine.mr.mapper-input-rows=1000000
+#
+## Enable dictionary building in MR reducer
+#kylin.engine.mr.build-dict-in-reducer=true
+#
+## Number of reducers for fetching UHC column distinct values
+#kylin.engine.mr.uhc-reducer-count=3
+#
+## Whether using an additional step to build UHC dictionary
+#kylin.engine.mr.build-uhc-dict-in-additional-step=false
+#
+#
+#### CUBE | DICTIONARY ###
+#
+#kylin.cube.cuboid-scheduler=org.apache.kylin.cube.cuboid.DefaultCuboidScheduler
+#kylin.cube.segment-advisor=org.apache.kylin.cube.CubeSegmentAdvisor
+#
+## 'auto', 'inmem', 'layer' or 'random' for testing 
+#kylin.cube.algorithm=layer
+#
+## A smaller threshold prefers layer, a larger threshold prefers in-mem
+#kylin.cube.algorithm.layer-or-inmem-threshold=7
+#
+## auto use inmem algorithm:
+## 1, cube planner optimize job
+## 2, no source record
+#kylin.cube.algorithm.inmem-auto-optimize=true
+#
+#kylin.cube.aggrgroup.max-combination=32768
+#
+#kylin.snapshot.max-mb=300
+#
+#kylin.cube.cubeplanner.enabled=true
+#kylin.cube.cubeplanner.enabled-for-existing-cube=true
+#kylin.cube.cubeplanner.expansion-threshold=15.0
+#kylin.cube.cubeplanner.recommend-cache-max-size=200
+#kylin.cube.cubeplanner.mandatory-rollup-threshold=1000
+#kylin.cube.cubeplanner.algorithm-threshold-greedy=8
+#kylin.cube.cubeplanner.algorithm-threshold-genetic=23
+#
+#
+#### QUERY ###
+#
+## Controls the maximum number of bytes a query is allowed to scan storage.
+## The default value 0 means no limit.
+## The counterpart kylin.storage.partition.max-scan-bytes sets the maximum per coprocessor.
+#kylin.query.max-scan-bytes=0
+#
+kylin.query.cache-enabled=true
+#
+## Controls extras properties for Calcite jdbc driver
+## all extras properties should undder prefix "kylin.query.calcite.extras-props."
+## case sensitive, default: true, to enable case insensitive set it to false
+## @see org.apache.calcite.config.CalciteConnectionProperty.CASE_SENSITIVE
+#kylin.query.calcite.extras-props.caseSensitive=true
+## how to handle unquoted identity, defualt: TO_UPPER, available options: UNCHANGED, TO_UPPER, TO_LOWER
+## @see org.apache.calcite.config.CalciteConnectionProperty.UNQUOTED_CASING
+#kylin.query.calcite.extras-props.unquotedCasing=TO_UPPER
+## quoting method, default: DOUBLE_QUOTE, available options: DOUBLE_QUOTE, BACK_TICK, BRACKET
+## @see org.apache.calcite.config.CalciteConnectionProperty.QUOTING
+#kylin.query.calcite.extras-props.quoting=DOUBLE_QUOTE
+## change SqlConformance from DEFAULT to LENIENT to enable group by ordinal
+## @see org.apache.calcite.sql.validate.SqlConformance.SqlConformanceEnum
+#kylin.query.calcite.extras-props.conformance=LENIENT
+#
+## TABLE ACL
+#kylin.query.security.table-acl-enabled=true
+#
+## Usually should not modify this
+#kylin.query.interceptors=org.apache.kylin.rest.security.TableInterceptor
+#
+#kylin.query.escape-default-keyword=false
+#
+## Usually should not modify this
+#kylin.query.transformers=org.apache.kylin.query.util.DefaultQueryTransformer,org.apache.kylin.query.util.KeywordDefaultDirtyHack
+#
+#### SECURITY ###
+#
+## Spring security profile, options: testing, ldap, saml
+## with "testing" profile, user can use pre-defined name/pwd like KYLIN/ADMIN to login
+#kylin.security.profile=testing
+#
+## Admin roles in LDAP, for ldap and saml
+#kylin.security.acl.admin-role=admin
+#
+## LDAP authentication configuration
+#kylin.security.ldap.connection-server=ldap://ldap_server:389
+#kylin.security.ldap.connection-username=
+#kylin.security.ldap.connection-password=
+#
+## LDAP user account directory;
+#kylin.security.ldap.user-search-base=
+#kylin.security.ldap.user-search-pattern=
+#kylin.security.ldap.user-group-search-base=
+#kylin.security.ldap.user-group-search-filter=(|(member={0})(memberUid={1}))
+#
+## LDAP service account directory
+#kylin.security.ldap.service-search-base=
+#kylin.security.ldap.service-search-pattern=
+#kylin.security.ldap.service-group-search-base=
+#
+### SAML configurations for SSO
+## SAML IDP metadata file location
+#kylin.security.saml.metadata-file=classpath:sso_metadata.xml
+#kylin.security.saml.metadata-entity-base-url=https://hostname/kylin
+#kylin.security.saml.keystore-file=classpath:samlKeystore.jks
+#kylin.security.saml.context-scheme=https
+#kylin.security.saml.context-server-name=hostname
+#kylin.security.saml.context-server-port=443
+#kylin.security.saml.context-path=/kylin
+#
+#### SPARK ENGINE CONFIGS ###
+#
+## Hadoop conf folder, will export this as "HADOOP_CONF_DIR" to run spark-submit
+## This must contain site xmls of core, yarn, hive, and hbase in one folder
+##kylin.env.hadoop-conf-dir=/etc/hadoop/conf
+#
+## Estimate the RDD partition numbers
+#kylin.engine.spark.rdd-partition-cut-mb=10
+#
+## Minimal partition numbers of rdd
+#kylin.engine.spark.min-partition=1
+#
+## Max partition numbers of rdd
+#kylin.engine.spark.max-partition=5000
+#
+## Spark conf (default is in spark/conf/spark-defaults.conf)
+#kylin.engine.spark-conf.spark.master=yarn
+##kylin.engine.spark-conf.spark.submit.deployMode=cluster
+#kylin.engine.spark-conf.spark.yarn.queue=default
+#kylin.engine.spark-conf.spark.driver.memory=2G
+#kylin.engine.spark-conf.spark.executor.memory=4G
+#kylin.engine.spark-conf.spark.executor.instances=40
+#kylin.engine.spark-conf.spark.yarn.executor.memoryOverhead=1024
+#kylin.engine.spark-conf.spark.shuffle.service.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.dir=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.history.fs.logDirectory=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false
+#
+#### Spark conf for specific job
+#kylin.engine.spark-conf-mergedict.spark.executor.memory=6G
+#kylin.engine.spark-conf-mergedict.spark.memory.fraction=0.2
+#
+## manually upload spark-assembly jar to HDFS and then set this property will avoid repeatedly uploading jar at runtime
+##kylin.engine.spark-conf.spark.yarn.archive=hdfs://namenode:8020/kylin/spark/spark-libs.jar
+##kylin.engine.spark-conf.spark.io.compression.codec=org.apache.spark.io.SnappyCompressionCodec
+#
+## uncomment for HDP
+##kylin.engine.spark-conf.spark.driver.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current
+#
+#
+#### QUERY PUSH DOWN ###
+#
+##kylin.query.pushdown.runner-class-name=org.apache.kylin.query.adhoc.PushDownRunnerJdbcImpl
+#
+##kylin.query.pushdown.update-enabled=false
+##kylin.query.pushdown.jdbc.url=jdbc:hive2://sandbox:10000/default
+##kylin.query.pushdown.jdbc.driver=org.apache.hive.jdbc.HiveDriver
+##kylin.query.pushdown.jdbc.username=hive
+##kylin.query.pushdown.jdbc.password=
+#
+##kylin.query.pushdown.jdbc.pool-max-total=8
+##kylin.query.pushdown.jdbc.pool-max-idle=8
+##kylin.query.pushdown.jdbc.pool-min-idle=0
+#
+#### JDBC Data Source
+##kylin.source.jdbc.connection-url=
+##kylin.source.jdbc.driver=
+##kylin.source.jdbc.dialect=
+##kylin.source.jdbc.user=
+##kylin.source.jdbc.pass=
+##kylin.source.jdbc.sqoop-home=
+##kylin.source.jdbc.filed-delimiter=|
+#
+#### Livy with Kylin
+##kylin.engine.livy-conf.livy-enabled=false
+##kylin.engine.livy-conf.livy-url=http://LivyHost:8998
+##kylin.engine.livy-conf.livy-key.file=hdfs:///path-to-kylin-job-jar
+##kylin.engine.livy-conf.livy-arr.jars=hdfs:///path-to-hadoop-dependency-jar
+#
+#
+#### Realtime OLAP ###
+#
+## Where should local segment cache located, for absolute path, the real path will be ${KYLIN_HOME}/${kylin.stream.index.path}
+#kylin.stream.index.path=stream_index
+#
+## The timezone for Derived Time Column like hour_start, try set to GMT+N, please check detail at KYLIN-4010
+#kylin.stream.event.timezone=
+#
+## Debug switch for print realtime global dict encode information, please check detail at KYLIN-4141
+#kylin.stream.print-realtime-dict-enabled=false
+#
+## Should enable latest coordinator, please check detail at KYLIN-4167
+#kylin.stream.new.coordinator-enabled=true
+#
+## In which way should we collect receiver's metrics info
+##kylin.stream.metrics.option=console/csv/jmx
+#
+## When enable a streaming cube, should cousme from earliest offset or least offset
+#kylin.stream.consume.offsets.latest=true
+#
+## The parallelism of scan in receiver side
+#kylin.stream.receiver.use-threads-per-query=8
+#
+## How coordinator/receiver register itself into StreamMetadata, there are three option:
+## 1. hostname:port, then kylin will set the config ip and port as the currentNode;
+## 2. port, then kylin will get the node's hostname and append port as the currentNode;
+## 3. not set, then kylin will get the node hostname address and set the hostname and defaultPort(7070 for coordinator or 9090 for receiver) as the currentNode.
+##kylin.stream.node=
+#
+## Auto resubmit after job be discarded
+#kylin.stream.auto-resubmit-after-discard-enabled=true
diff --git a/kubernetes/config/production/kylin-query/kylin_hive_conf.xml b/kubernetes/config/production/kylin-query/kylin_hive_conf.xml
new file mode 100644
index 0000000..f01d08e
--- /dev/null
+++ b/kubernetes/config/production/kylin-query/kylin_hive_conf.xml
@@ -0,0 +1,102 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>hive.exec.compress.output</name>
+        <value>true</value>
+        <description>Enable compress</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join</name>
+        <value>true</value>
+        <description>Enables the optimization about converting common join into mapjoin</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join.noconditionaltask</name>
+        <value>true</value>
+        <description>enable map-side join</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join.noconditionaltask.size</name>
+        <value>100000000</value>
+        <description>enable map-side join</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description></description>
+    </property>
+    -->
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description></description>
+    </property>
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    -->
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>hive.stats.autogather</name>
+        <value>true</value>
+        <description>Collect statistics for newly created intermediate table</description>
+    </property>
+
+    <property>
+        <name>hive.merge.mapfiles</name>
+        <value>false</value>
+        <description>Disable Hive's auto merge</description>
+    </property>
+
+    <property>
+        <name>hive.merge.mapredfiles</name>
+        <value>false</value>
+        <description>Disable Hive's auto merge</description>
+    </property>
+</configuration>
diff --git a/kubernetes/config/production/kylin-query/kylin_job_conf.xml b/kubernetes/config/production/kylin-query/kylin_job_conf.xml
new file mode 100644
index 0000000..17a9145
--- /dev/null
+++ b/kubernetes/config/production/kylin-query/kylin_job_conf.xml
@@ -0,0 +1,88 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>3600000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+</configuration>
diff --git a/kubernetes/config/production/kylin-query/kylin_job_conf_cube_merge.xml b/kubernetes/config/production/kylin-query/kylin_job_conf_cube_merge.xml
new file mode 100644
index 0000000..79365ad
--- /dev/null
+++ b/kubernetes/config/production/kylin-query/kylin_job_conf_cube_merge.xml
@@ -0,0 +1,104 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>7200000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+    <!--Additional config for cube merge job, giving more memory -->
+    <property>
+        <name>mapreduce.map.memory.mb</name>
+        <value>3072</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.java.opts</name>
+        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.io.sort.mb</name>
+        <value>200</value>
+        <description></description>
+    </property>
+</configuration>
diff --git a/kubernetes/config/production/kylin-query/kylin_job_conf_inmem.xml b/kubernetes/config/production/kylin-query/kylin_job_conf_inmem.xml
new file mode 100644
index 0000000..ddda4dd
--- /dev/null
+++ b/kubernetes/config/production/kylin-query/kylin_job_conf_inmem.xml
@@ -0,0 +1,111 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>mapreduce.job.is-mem-hungry</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>7200000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+    <!--Additional config for in-mem cubing, giving mapper more memory -->
+    <property>
+        <name>mapreduce.map.memory.mb</name>
+        <value>3072</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.java.opts</name>
+        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.io.sort.mb</name>
+        <value>200</value>
+        <description></description>
+    </property>
+
+</configuration>
diff --git a/kubernetes/config/production/kylin-query/setenv-tool.sh b/kubernetes/config/production/kylin-query/setenv-tool.sh
new file mode 100644
index 0000000..487b5ef
--- /dev/null
+++ b/kubernetes/config/production/kylin-query/setenv-tool.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# source me
+
+# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
+# uncomment following to for it to take effect
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx4096M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+# See HADOOP-7154.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
+
+# uncomment following to for it to take effect(the values need adjusting to fit your env)
+# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
+if [[ -d "/usr/hdp/current/hadoop-client" ]]
+then
+   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
+   # attempt to locate JVM native libraries and set corresponding property
+   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
+   then
+      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
+   fi
+else
+   export KYLIN_EXTRA_START_OPTS=""
+   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
+   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
+fi
+
+if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
+then
+    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
+fi
+
+if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
+then
+    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
+fi
+
+if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
+then
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
+fi
diff --git a/kubernetes/config/production/kylin-query/setenv.sh b/kubernetes/config/production/kylin-query/setenv.sh
new file mode 100644
index 0000000..fa88769
--- /dev/null
+++ b/kubernetes/config/production/kylin-query/setenv.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# source me
+
+# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
+# uncomment following to for it to take effect
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx2048M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+# See HADOOP-7154.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
+
+# uncomment following to for it to take effect(the values need adjusting to fit your env)
+# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
+if [[ -d "/usr/hdp/current/hadoop-client" ]]
+then
+   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
+   # attempt to locate JVM native libraries and set corresponding property
+   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
+   then
+      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
+   fi
+else
+   export KYLIN_EXTRA_START_OPTS=""
+   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
+   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
+fi
+
+if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
+then
+    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
+fi
+
+if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
+then
+    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
+fi
+
+if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
+then
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
+fi
diff --git a/kubernetes/config/production/streaming-receiver/kylin.properties b/kubernetes/config/production/streaming-receiver/kylin.properties
new file mode 100644
index 0000000..2a6d988
--- /dev/null
+++ b/kubernetes/config/production/streaming-receiver/kylin.properties
@@ -0,0 +1,413 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+
+# The below commented values will effect as default settings
+# Uncomment and override them if necessary
+
+
+
+#
+#### METADATA | ENV ###
+#
+## The metadata store in hbase
+kylin.metadata.url=kylin_metadata_k8s_prod@hbase
+#
+## metadata cache sync retry times
+#kylin.metadata.sync-retries=3
+#
+## Working folder in HDFS, better be qualified absolute path, make sure user has the right permission to this directory
+#kylin.env.hdfs-working-dir=/kylin
+#
+## DEV|QA|PROD. DEV will turn on some dev features, QA and PROD has no difference in terms of functions.
+#kylin.env=QA
+#
+## kylin zk base path
+#kylin.env.zookeeper-base-path=/kylin
+#
+#### SERVER | WEB | RESTCLIENT ###
+#
+## Kylin server mode, valid value [all, query, job]
+#kylin.server.mode=all
+#
+## List of web servers in use, this enables one web server instance to sync up with other servers.
+#kylin.server.cluster-servers=localhost:7070
+#
+## Display timezone on UI,format like[GMT+N or GMT-N]
+#kylin.web.timezone=
+#
+## Timeout value for the queries submitted through the Web UI, in milliseconds
+#kylin.web.query-timeout=300000
+#
+#kylin.web.cross-domain-enabled=true
+#
+##allow user to export query result
+#kylin.web.export-allow-admin=true
+#kylin.web.export-allow-other=true
+#
+## Hide measures in measure list of cube designer, separate by comma
+#kylin.web.hide-measures=RAW
+#
+##max connections of one route
+#kylin.restclient.connection.default-max-per-route=20
+#
+##max connections of one rest-client
+#kylin.restclient.connection.max-total=200
+#
+#### PUBLIC CONFIG ###
+#kylin.engine.default=2
+#kylin.storage.default=2
+#kylin.web.hive-limit=20
+#kylin.web.help.length=4
+#kylin.web.help.0=start|Getting Started|http://kylin.apache.org/docs/tutorial/kylin_sample.html
+#kylin.web.help.1=odbc|ODBC Driver|http://kylin.apache.org/docs/tutorial/odbc.html
+#kylin.web.help.2=tableau|Tableau Guide|http://kylin.apache.org/docs/tutorial/tableau_91.html
+#kylin.web.help.3=onboard|Cube Design Tutorial|http://kylin.apache.org/docs/howto/howto_optimize_cubes.html
+#kylin.web.link-streaming-guide=http://kylin.apache.org/
+#kylin.htrace.show-gui-trace-toggle=false
+#kylin.web.link-hadoop=
+#kylin.web.link-diagnostic=
+#kylin.web.contact-mail=
+#kylin.server.external-acl-provider=
+#
+## Default time filter for job list, 0->current day, 1->last one day, 2->last one week, 3->last one year, 4->all
+#kylin.web.default-time-filter=1
+#
+#### SOURCE ###
+#
+## Hive client, valid value [cli, beeline]
+#kylin.source.hive.client=cli
+#
+## Absolute path to beeline shell, can be set to spark beeline instead of the default hive beeline on PATH
+#kylin.source.hive.beeline-shell=beeline
+#
+## Parameters for beeline client, only necessary if hive client is beeline
+##kylin.source.hive.beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+## While hive client uses above settings to read hive table metadata,
+## table operations can go through a separate SparkSQL command line, given SparkSQL connects to the same Hive metastore.
+#kylin.source.hive.enable-sparksql-for-table-ops=false
+##kylin.source.hive.sparksql-beeline-shell=/path/to/spark-client/bin/beeline
+##kylin.source.hive.sparksql-beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+#kylin.source.hive.keep-flat-table=false
+#
+## Hive database name for putting the intermediate flat tables
+#kylin.source.hive.database-for-flat-table=default
+#
+## Whether redistribute the intermediate flat table before building
+#kylin.source.hive.redistribute-flat-table=true
+#
+#
+#### STORAGE ###
+#
+## The storage for final cube file in hbase
+#kylin.storage.url=hbase
+#
+## The prefix of hbase table
+#kylin.storage.hbase.table-name-prefix=KYLIN_
+#
+## The namespace for hbase storage
+#kylin.storage.hbase.namespace=default
+#
+## Compression codec for htable, valid value [none, snappy, lzo, gzip, lz4]
+#kylin.storage.hbase.compression-codec=none
+#
+## HBase Cluster FileSystem, which serving hbase, format as hdfs://hbase-cluster:8020
+## Leave empty if hbase running on same cluster with hive and mapreduce
+##kylin.storage.hbase.cluster-fs=
+#
+## The cut size for hbase region, in GB.
+#kylin.storage.hbase.region-cut-gb=5
+#
+## The hfile size of GB, smaller hfile leading to the converting hfile MR has more reducers and be faster.
+## Set 0 to disable this optimization.
+#kylin.storage.hbase.hfile-size-gb=2
+#
+#kylin.storage.hbase.min-region-count=1
+#kylin.storage.hbase.max-region-count=500
+#
+## Optional information for the owner of kylin platform, it can be your team's email
+## Currently it will be attached to each kylin's htable attribute
+#kylin.storage.hbase.owner-tag=whoami@kylin.apache.org
+#
+#kylin.storage.hbase.coprocessor-mem-gb=3
+#
+## By default kylin can spill query's intermediate results to disks when it's consuming too much memory.
+## Set it to false if you want query to abort immediately in such condition.
+#kylin.storage.partition.aggr-spill-enabled=true
+#
+## The maximum number of bytes each coprocessor is allowed to scan.
+## To allow arbitrary large scan, you can set it to 0.
+#kylin.storage.partition.max-scan-bytes=3221225472
+#
+## The default coprocessor timeout is (hbase.rpc.timeout * 0.9) / 1000 seconds,
+## You can set it to a smaller value. 0 means use default.
+## kylin.storage.hbase.coprocessor-timeout-seconds=0
+#
+## clean real storage after delete operation
+## if you want to delete the real storage like htable of deleting segment, you can set it to true
+#kylin.storage.clean-after-delete-operation=false
+#
+#### JOB ###
+#
+## Max job retry on error, default 0: no retry
+#kylin.job.retry=0
+#
+## Max count of concurrent jobs running
+#kylin.job.max-concurrent-jobs=10
+#
+## The percentage of the sampling, default 100%
+#kylin.job.sampling-percentage=100
+#
+## If true, will send email notification on job complete
+##kylin.job.notification-enabled=true
+##kylin.job.notification-mail-enable-starttls=true
+##kylin.job.notification-mail-host=smtp.office365.com
+##kylin.job.notification-mail-port=587
+##kylin.job.notification-mail-username=kylin@example.com
+##kylin.job.notification-mail-password=mypassword
+##kylin.job.notification-mail-sender=kylin@example.com
+#kylin.job.scheduler.provider.100=org.apache.kylin.job.impl.curator.CuratorScheduler
+#kylin.job.scheduler.default=0
+#
+#### ENGINE ###
+#
+## Time interval to check hadoop job status
+#kylin.engine.mr.yarn-check-interval-seconds=10
+#
+#kylin.engine.mr.reduce-input-mb=500
+#
+#kylin.engine.mr.max-reducer-number=500
+#
+#kylin.engine.mr.mapper-input-rows=1000000
+#
+## Enable dictionary building in MR reducer
+#kylin.engine.mr.build-dict-in-reducer=true
+#
+## Number of reducers for fetching UHC column distinct values
+#kylin.engine.mr.uhc-reducer-count=3
+#
+## Whether using an additional step to build UHC dictionary
+#kylin.engine.mr.build-uhc-dict-in-additional-step=false
+#
+#
+#### CUBE | DICTIONARY ###
+#
+#kylin.cube.cuboid-scheduler=org.apache.kylin.cube.cuboid.DefaultCuboidScheduler
+#kylin.cube.segment-advisor=org.apache.kylin.cube.CubeSegmentAdvisor
+#
+## 'auto', 'inmem', 'layer' or 'random' for testing 
+#kylin.cube.algorithm=layer
+#
+## A smaller threshold prefers layer, a larger threshold prefers in-mem
+#kylin.cube.algorithm.layer-or-inmem-threshold=7
+#
+## auto use inmem algorithm:
+## 1, cube planner optimize job
+## 2, no source record
+#kylin.cube.algorithm.inmem-auto-optimize=true
+#
+#kylin.cube.aggrgroup.max-combination=32768
+#
+#kylin.snapshot.max-mb=300
+#
+#kylin.cube.cubeplanner.enabled=true
+#kylin.cube.cubeplanner.enabled-for-existing-cube=true
+#kylin.cube.cubeplanner.expansion-threshold=15.0
+#kylin.cube.cubeplanner.recommend-cache-max-size=200
+#kylin.cube.cubeplanner.mandatory-rollup-threshold=1000
+#kylin.cube.cubeplanner.algorithm-threshold-greedy=8
+#kylin.cube.cubeplanner.algorithm-threshold-genetic=23
+#
+#
+#### QUERY ###
+#
+## Controls the maximum number of bytes a query is allowed to scan storage.
+## The default value 0 means no limit.
+## The counterpart kylin.storage.partition.max-scan-bytes sets the maximum per coprocessor.
+#kylin.query.max-scan-bytes=0
+#
+#kylin.query.cache-enabled=true
+#
+## Controls extras properties for Calcite jdbc driver
+## all extras properties should undder prefix "kylin.query.calcite.extras-props."
+## case sensitive, default: true, to enable case insensitive set it to false
+## @see org.apache.calcite.config.CalciteConnectionProperty.CASE_SENSITIVE
+#kylin.query.calcite.extras-props.caseSensitive=true
+## how to handle unquoted identity, defualt: TO_UPPER, available options: UNCHANGED, TO_UPPER, TO_LOWER
+## @see org.apache.calcite.config.CalciteConnectionProperty.UNQUOTED_CASING
+#kylin.query.calcite.extras-props.unquotedCasing=TO_UPPER
+## quoting method, default: DOUBLE_QUOTE, available options: DOUBLE_QUOTE, BACK_TICK, BRACKET
+## @see org.apache.calcite.config.CalciteConnectionProperty.QUOTING
+#kylin.query.calcite.extras-props.quoting=DOUBLE_QUOTE
+## change SqlConformance from DEFAULT to LENIENT to enable group by ordinal
+## @see org.apache.calcite.sql.validate.SqlConformance.SqlConformanceEnum
+#kylin.query.calcite.extras-props.conformance=LENIENT
+#
+## TABLE ACL
+#kylin.query.security.table-acl-enabled=true
+#
+## Usually should not modify this
+#kylin.query.interceptors=org.apache.kylin.rest.security.TableInterceptor
+#
+#kylin.query.escape-default-keyword=false
+#
+## Usually should not modify this
+#kylin.query.transformers=org.apache.kylin.query.util.DefaultQueryTransformer,org.apache.kylin.query.util.KeywordDefaultDirtyHack
+#
+#### SECURITY ###
+#
+## Spring security profile, options: testing, ldap, saml
+## with "testing" profile, user can use pre-defined name/pwd like KYLIN/ADMIN to login
+#kylin.security.profile=testing
+#
+## Admin roles in LDAP, for ldap and saml
+#kylin.security.acl.admin-role=admin
+#
+## LDAP authentication configuration
+#kylin.security.ldap.connection-server=ldap://ldap_server:389
+#kylin.security.ldap.connection-username=
+#kylin.security.ldap.connection-password=
+#
+## LDAP user account directory;
+#kylin.security.ldap.user-search-base=
+#kylin.security.ldap.user-search-pattern=
+#kylin.security.ldap.user-group-search-base=
+#kylin.security.ldap.user-group-search-filter=(|(member={0})(memberUid={1}))
+#
+## LDAP service account directory
+#kylin.security.ldap.service-search-base=
+#kylin.security.ldap.service-search-pattern=
+#kylin.security.ldap.service-group-search-base=
+#
+### SAML configurations for SSO
+## SAML IDP metadata file location
+#kylin.security.saml.metadata-file=classpath:sso_metadata.xml
+#kylin.security.saml.metadata-entity-base-url=https://hostname/kylin
+#kylin.security.saml.keystore-file=classpath:samlKeystore.jks
+#kylin.security.saml.context-scheme=https
+#kylin.security.saml.context-server-name=hostname
+#kylin.security.saml.context-server-port=443
+#kylin.security.saml.context-path=/kylin
+#
+#### SPARK ENGINE CONFIGS ###
+#
+## Hadoop conf folder, will export this as "HADOOP_CONF_DIR" to run spark-submit
+## This must contain site xmls of core, yarn, hive, and hbase in one folder
+##kylin.env.hadoop-conf-dir=/etc/hadoop/conf
+#
+## Estimate the RDD partition numbers
+#kylin.engine.spark.rdd-partition-cut-mb=10
+#
+## Minimal partition numbers of rdd
+#kylin.engine.spark.min-partition=1
+#
+## Max partition numbers of rdd
+#kylin.engine.spark.max-partition=5000
+#
+## Spark conf (default is in spark/conf/spark-defaults.conf)
+#kylin.engine.spark-conf.spark.master=yarn
+##kylin.engine.spark-conf.spark.submit.deployMode=cluster
+#kylin.engine.spark-conf.spark.yarn.queue=default
+#kylin.engine.spark-conf.spark.driver.memory=2G
+#kylin.engine.spark-conf.spark.executor.memory=4G
+#kylin.engine.spark-conf.spark.executor.instances=40
+#kylin.engine.spark-conf.spark.yarn.executor.memoryOverhead=1024
+#kylin.engine.spark-conf.spark.shuffle.service.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.dir=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.history.fs.logDirectory=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false
+#
+#### Spark conf for specific job
+#kylin.engine.spark-conf-mergedict.spark.executor.memory=6G
+#kylin.engine.spark-conf-mergedict.spark.memory.fraction=0.2
+#
+## manually upload spark-assembly jar to HDFS and then set this property will avoid repeatedly uploading jar at runtime
+##kylin.engine.spark-conf.spark.yarn.archive=hdfs://namenode:8020/kylin/spark/spark-libs.jar
+##kylin.engine.spark-conf.spark.io.compression.codec=org.apache.spark.io.SnappyCompressionCodec
+#
+## uncomment for HDP
+##kylin.engine.spark-conf.spark.driver.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current
+#
+#
+#### QUERY PUSH DOWN ###
+#
+##kylin.query.pushdown.runner-class-name=org.apache.kylin.query.adhoc.PushDownRunnerJdbcImpl
+#
+##kylin.query.pushdown.update-enabled=false
+##kylin.query.pushdown.jdbc.url=jdbc:hive2://sandbox:10000/default
+##kylin.query.pushdown.jdbc.driver=org.apache.hive.jdbc.HiveDriver
+##kylin.query.pushdown.jdbc.username=hive
+##kylin.query.pushdown.jdbc.password=
+#
+##kylin.query.pushdown.jdbc.pool-max-total=8
+##kylin.query.pushdown.jdbc.pool-max-idle=8
+##kylin.query.pushdown.jdbc.pool-min-idle=0
+#
+#### JDBC Data Source
+##kylin.source.jdbc.connection-url=
+##kylin.source.jdbc.driver=
+##kylin.source.jdbc.dialect=
+##kylin.source.jdbc.user=
+##kylin.source.jdbc.pass=
+##kylin.source.jdbc.sqoop-home=
+##kylin.source.jdbc.filed-delimiter=|
+#
+#### Livy with Kylin
+##kylin.engine.livy-conf.livy-enabled=false
+##kylin.engine.livy-conf.livy-url=http://LivyHost:8998
+##kylin.engine.livy-conf.livy-key.file=hdfs:///path-to-kylin-job-jar
+##kylin.engine.livy-conf.livy-arr.jars=hdfs:///path-to-hadoop-dependency-jar
+#
+#
+#### Realtime OLAP ###
+#
+## Where should local segment cache located, for absolute path, the real path will be ${KYLIN_HOME}/${kylin.stream.index.path}
+kylin.stream.index.path=stream_index
+#
+## The timezone for Derived Time Column like hour_start, try set to GMT+N, please check detail at KYLIN-4010
+kylin.stream.event.timezone=GMT+8
+#
+## Debug switch for print realtime global dict encode information, please check detail at KYLIN-4141
+#kylin.stream.print-realtime-dict-enabled=false
+#
+## Should enable latest coordinator, please check detail at KYLIN-4167
+#kylin.stream.new.coordinator-enabled=true
+#
+## In which way should we collect receiver's metrics info
+kylin.stream.metrics.option=console
+#
+## When enable a streaming cube, should cousme from earliest offset or least offset
+#kylin.stream.consume.offsets.latest=true
+#
+## The parallelism of scan in receiver side
+#kylin.stream.receiver.use-threads-per-query=8
+#
+## How coordinator/receiver register itself into StreamMetadata, there are three option:
+## 1. hostname:port, then kylin will set the config ip and port as the currentNode;
+## 2. port, then kylin will get the node's hostname and append port as the currentNode;
+## 3. not set, then kylin will get the node hostname address and set the hostname and defaultPort(7070 for coordinator or 9090 for receiver) as the currentNode.
+##kylin.stream.node=
+#
+## Auto resubmit after job be discarded
+#kylin.stream.auto-resubmit-after-discard-enabled=true
diff --git a/kubernetes/config/production/streaming-receiver/setenv.sh b/kubernetes/config/production/streaming-receiver/setenv.sh
new file mode 100644
index 0000000..fa88769
--- /dev/null
+++ b/kubernetes/config/production/streaming-receiver/setenv.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# source me
+
+# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
+# uncomment following to for it to take effect
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx2048M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+# See HADOOP-7154.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
+
+# uncomment following to for it to take effect(the values need adjusting to fit your env)
+# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
+if [[ -d "/usr/hdp/current/hadoop-client" ]]
+then
+   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
+   # attempt to locate JVM native libraries and set corresponding property
+   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
+   then
+      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
+   fi
+else
+   export KYLIN_EXTRA_START_OPTS=""
+   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
+   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
+fi
+
+if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
+then
+    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
+fi
+
+if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
+then
+    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
+fi
+
+if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
+then
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
+fi
diff --git a/kubernetes/config/quickstart/hadoop/core-site.xml b/kubernetes/config/quickstart/hadoop/core-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/quickstart/hadoop/core-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/quickstart/hadoop/hbase-site.xml b/kubernetes/config/quickstart/hadoop/hbase-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/quickstart/hadoop/hbase-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/quickstart/hadoop/hdfs-site.xml b/kubernetes/config/quickstart/hadoop/hdfs-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/quickstart/hadoop/hdfs-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/quickstart/hadoop/hive-site.xml b/kubernetes/config/quickstart/hadoop/hive-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/quickstart/hadoop/hive-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/quickstart/hadoop/mapred-site.xml b/kubernetes/config/quickstart/hadoop/mapred-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/quickstart/hadoop/mapred-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/quickstart/hadoop/yarn-site.xml b/kubernetes/config/quickstart/hadoop/yarn-site.xml
new file mode 100644
index 0000000..9108ad8
--- /dev/null
+++ b/kubernetes/config/quickstart/hadoop/yarn-site.xml
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<configuration>
+</configuration>
diff --git a/kubernetes/config/quickstart/kylin/kylin-kafka-consumer.xml b/kubernetes/config/quickstart/kylin/kylin-kafka-consumer.xml
new file mode 100644
index 0000000..8529a41
--- /dev/null
+++ b/kubernetes/config/quickstart/kylin/kylin-kafka-consumer.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+ for more kafka consumer configs, please refer to http://kafka.apache.org/documentation#consumerconfigs
+-->
+<configuration>
+    <property>
+        <name>session.timeout.ms</name>
+        <value>10000</value>
+    </property>
+    <property>
+        <name>request.timeout.ms</name>
+        <value>20000</value>
+    </property>
+</configuration>
\ No newline at end of file
diff --git a/k8s/developments/config/kylin/kylin-server-log4j.properties b/kubernetes/config/quickstart/kylin/kylin-server-log4j.properties
similarity index 100%
copy from k8s/developments/config/kylin/kylin-server-log4j.properties
copy to kubernetes/config/quickstart/kylin/kylin-server-log4j.properties
diff --git a/k8s/developments/config/kylin/kylin-tools-log4j.properties b/kubernetes/config/quickstart/kylin/kylin-spark-log4j.properties
similarity index 51%
copy from k8s/developments/config/kylin/kylin-tools-log4j.properties
copy to kubernetes/config/quickstart/kylin/kylin-spark-log4j.properties
index 54d18c2..948fb32 100644
--- a/k8s/developments/config/kylin/kylin-tools-log4j.properties
+++ b/kubernetes/config/quickstart/kylin/kylin-spark-log4j.properties
@@ -15,24 +15,29 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-
-
-# the kylin-tools-log4j.properties is mainly for configuring log properties on kylin tools, including:
-#   1. tools launched by kylin.sh script, e.g. DeployCoprocessorCLI
-#   2. DebugTomcat
-#   3. others
-#
-# It's called kylin-tools-log4j.properties so that it won't distract users from the other more important log4j config file: kylin-server-log4j.properties
-# enable this by -Dlog4j.configuration=kylin-tools-log4j.properties
-
-log4j.rootLogger=INFO,stderr
+log4j.rootCategory=WARN,stderr,stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.target=System.out
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
 
 log4j.appender.stderr=org.apache.log4j.ConsoleAppender
 log4j.appender.stderr.Target=System.err
 log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
 log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
 
-#log4j.logger.org.apache.hadoop=ERROR
-log4j.logger.org.apache.kylin=DEBUG
-log4j.logger.org.springframework=WARN
-log4j.logger.org.apache.kylin.tool.shaded=INFO
\ No newline at end of file
+
+# Settings to quiet third party logs that are too verbose
+log4j.logger.org.spark-project.jetty=WARN
+log4j.logger.org.spark-project.jetty.util.component.AbstractLifeCycle=ERROR
+log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
+log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
+log4j.logger.org.apache.parquet=ERROR
+log4j.logger.parquet=ERROR
+
+# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
+log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
+log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
+log4j.logger.org.apache.spark.sql=WARN
+
+log4j.logger.org.apache.kylin=DEBUG
\ No newline at end of file
diff --git a/k8s/developments/config/kylin/kylin-tools-log4j.properties b/kubernetes/config/quickstart/kylin/kylin-tools-log4j.properties
similarity index 100%
copy from k8s/developments/config/kylin/kylin-tools-log4j.properties
copy to kubernetes/config/quickstart/kylin/kylin-tools-log4j.properties
diff --git a/kubernetes/config/quickstart/kylin/kylin.properties b/kubernetes/config/quickstart/kylin/kylin.properties
new file mode 100644
index 0000000..3f48c0b
--- /dev/null
+++ b/kubernetes/config/quickstart/kylin/kylin.properties
@@ -0,0 +1,413 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+
+# The below commented values will effect as default settings
+# Uncomment and override them if necessary
+
+
+
+#
+#### METADATA | ENV ###
+#
+## The metadata store in hbase
+kylin.metadata.url=kylin_metadata_k8s_quickstart@hbase
+#
+## metadata cache sync retry times
+#kylin.metadata.sync-retries=3
+#
+## Working folder in HDFS, better be qualified absolute path, make sure user has the right permission to this directory
+#kylin.env.hdfs-working-dir=/kylin
+#
+## DEV|QA|PROD. DEV will turn on some dev features, QA and PROD has no difference in terms of functions.
+#kylin.env=QA
+#
+## kylin zk base path
+#kylin.env.zookeeper-base-path=/kylin
+#
+#### SERVER | WEB | RESTCLIENT ###
+#
+## Kylin server mode, valid value [all, query, job]
+#kylin.server.mode=all
+#
+## List of web servers in use, this enables one web server instance to sync up with other servers.
+#kylin.server.cluster-servers=localhost:7070
+#
+## Display timezone on UI,format like[GMT+N or GMT-N]
+#kylin.web.timezone=
+#
+## Timeout value for the queries submitted through the Web UI, in milliseconds
+#kylin.web.query-timeout=300000
+#
+#kylin.web.cross-domain-enabled=true
+#
+##allow user to export query result
+#kylin.web.export-allow-admin=true
+#kylin.web.export-allow-other=true
+#
+## Hide measures in measure list of cube designer, separate by comma
+#kylin.web.hide-measures=RAW
+#
+##max connections of one route
+#kylin.restclient.connection.default-max-per-route=20
+#
+##max connections of one rest-client
+#kylin.restclient.connection.max-total=200
+#
+#### PUBLIC CONFIG ###
+#kylin.engine.default=2
+#kylin.storage.default=2
+#kylin.web.hive-limit=20
+#kylin.web.help.length=4
+#kylin.web.help.0=start|Getting Started|http://kylin.apache.org/docs/tutorial/kylin_sample.html
+#kylin.web.help.1=odbc|ODBC Driver|http://kylin.apache.org/docs/tutorial/odbc.html
+#kylin.web.help.2=tableau|Tableau Guide|http://kylin.apache.org/docs/tutorial/tableau_91.html
+#kylin.web.help.3=onboard|Cube Design Tutorial|http://kylin.apache.org/docs/howto/howto_optimize_cubes.html
+#kylin.web.link-streaming-guide=http://kylin.apache.org/
+#kylin.htrace.show-gui-trace-toggle=false
+#kylin.web.link-hadoop=
+#kylin.web.link-diagnostic=
+#kylin.web.contact-mail=
+#kylin.server.external-acl-provider=
+#
+## Default time filter for job list, 0->current day, 1->last one day, 2->last one week, 3->last one year, 4->all
+#kylin.web.default-time-filter=1
+#
+#### SOURCE ###
+#
+## Hive client, valid value [cli, beeline]
+#kylin.source.hive.client=cli
+#
+## Absolute path to beeline shell, can be set to spark beeline instead of the default hive beeline on PATH
+#kylin.source.hive.beeline-shell=beeline
+#
+## Parameters for beeline client, only necessary if hive client is beeline
+##kylin.source.hive.beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+## While hive client uses above settings to read hive table metadata,
+## table operations can go through a separate SparkSQL command line, given SparkSQL connects to the same Hive metastore.
+#kylin.source.hive.enable-sparksql-for-table-ops=false
+##kylin.source.hive.sparksql-beeline-shell=/path/to/spark-client/bin/beeline
+##kylin.source.hive.sparksql-beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+#kylin.source.hive.keep-flat-table=false
+#
+## Hive database name for putting the intermediate flat tables
+#kylin.source.hive.database-for-flat-table=default
+#
+## Whether redistribute the intermediate flat table before building
+#kylin.source.hive.redistribute-flat-table=true
+#
+#
+#### STORAGE ###
+#
+## The storage for final cube file in hbase
+#kylin.storage.url=hbase
+#
+## The prefix of hbase table
+#kylin.storage.hbase.table-name-prefix=KYLIN_
+#
+## The namespace for hbase storage
+#kylin.storage.hbase.namespace=default
+#
+## Compression codec for htable, valid value [none, snappy, lzo, gzip, lz4]
+#kylin.storage.hbase.compression-codec=none
+#
+## HBase Cluster FileSystem, which serving hbase, format as hdfs://hbase-cluster:8020
+## Leave empty if hbase running on same cluster with hive and mapreduce
+##kylin.storage.hbase.cluster-fs=
+#
+## The cut size for hbase region, in GB.
+#kylin.storage.hbase.region-cut-gb=5
+#
+## The hfile size of GB, smaller hfile leading to the converting hfile MR has more reducers and be faster.
+## Set 0 to disable this optimization.
+#kylin.storage.hbase.hfile-size-gb=2
+#
+#kylin.storage.hbase.min-region-count=1
+#kylin.storage.hbase.max-region-count=500
+#
+## Optional information for the owner of kylin platform, it can be your team's email
+## Currently it will be attached to each kylin's htable attribute
+#kylin.storage.hbase.owner-tag=whoami@kylin.apache.org
+#
+#kylin.storage.hbase.coprocessor-mem-gb=3
+#
+## By default kylin can spill query's intermediate results to disks when it's consuming too much memory.
+## Set it to false if you want query to abort immediately in such condition.
+#kylin.storage.partition.aggr-spill-enabled=true
+#
+## The maximum number of bytes each coprocessor is allowed to scan.
+## To allow arbitrary large scan, you can set it to 0.
+#kylin.storage.partition.max-scan-bytes=3221225472
+#
+## The default coprocessor timeout is (hbase.rpc.timeout * 0.9) / 1000 seconds,
+## You can set it to a smaller value. 0 means use default.
+## kylin.storage.hbase.coprocessor-timeout-seconds=0
+#
+## clean real storage after delete operation
+## if you want to delete the real storage like htable of deleting segment, you can set it to true
+#kylin.storage.clean-after-delete-operation=false
+#
+#### JOB ###
+#
+## Max job retry on error, default 0: no retry
+#kylin.job.retry=0
+#
+## Max count of concurrent jobs running
+#kylin.job.max-concurrent-jobs=10
+#
+## The percentage of the sampling, default 100%
+#kylin.job.sampling-percentage=100
+#
+## If true, will send email notification on job complete
+##kylin.job.notification-enabled=true
+##kylin.job.notification-mail-enable-starttls=true
+##kylin.job.notification-mail-host=smtp.office365.com
+##kylin.job.notification-mail-port=587
+##kylin.job.notification-mail-username=kylin@example.com
+##kylin.job.notification-mail-password=mypassword
+##kylin.job.notification-mail-sender=kylin@example.com
+#kylin.job.scheduler.provider.100=org.apache.kylin.job.impl.curator.CuratorScheduler
+#kylin.job.scheduler.default=0
+#
+#### ENGINE ###
+#
+## Time interval to check hadoop job status
+#kylin.engine.mr.yarn-check-interval-seconds=10
+#
+#kylin.engine.mr.reduce-input-mb=500
+#
+#kylin.engine.mr.max-reducer-number=500
+#
+#kylin.engine.mr.mapper-input-rows=1000000
+#
+## Enable dictionary building in MR reducer
+#kylin.engine.mr.build-dict-in-reducer=true
+#
+## Number of reducers for fetching UHC column distinct values
+#kylin.engine.mr.uhc-reducer-count=3
+#
+## Whether using an additional step to build UHC dictionary
+#kylin.engine.mr.build-uhc-dict-in-additional-step=false
+#
+#
+#### CUBE | DICTIONARY ###
+#
+#kylin.cube.cuboid-scheduler=org.apache.kylin.cube.cuboid.DefaultCuboidScheduler
+#kylin.cube.segment-advisor=org.apache.kylin.cube.CubeSegmentAdvisor
+#
+## 'auto', 'inmem', 'layer' or 'random' for testing 
+#kylin.cube.algorithm=layer
+#
+## A smaller threshold prefers layer, a larger threshold prefers in-mem
+#kylin.cube.algorithm.layer-or-inmem-threshold=7
+#
+## auto use inmem algorithm:
+## 1, cube planner optimize job
+## 2, no source record
+#kylin.cube.algorithm.inmem-auto-optimize=true
+#
+#kylin.cube.aggrgroup.max-combination=32768
+#
+#kylin.snapshot.max-mb=300
+#
+#kylin.cube.cubeplanner.enabled=true
+#kylin.cube.cubeplanner.enabled-for-existing-cube=true
+#kylin.cube.cubeplanner.expansion-threshold=15.0
+#kylin.cube.cubeplanner.recommend-cache-max-size=200
+#kylin.cube.cubeplanner.mandatory-rollup-threshold=1000
+#kylin.cube.cubeplanner.algorithm-threshold-greedy=8
+#kylin.cube.cubeplanner.algorithm-threshold-genetic=23
+#
+#
+#### QUERY ###
+#
+## Controls the maximum number of bytes a query is allowed to scan storage.
+## The default value 0 means no limit.
+## The counterpart kylin.storage.partition.max-scan-bytes sets the maximum per coprocessor.
+#kylin.query.max-scan-bytes=0
+#
+#kylin.query.cache-enabled=true
+#
+## Controls extras properties for Calcite jdbc driver
+## all extras properties should undder prefix "kylin.query.calcite.extras-props."
+## case sensitive, default: true, to enable case insensitive set it to false
+## @see org.apache.calcite.config.CalciteConnectionProperty.CASE_SENSITIVE
+#kylin.query.calcite.extras-props.caseSensitive=true
+## how to handle unquoted identity, defualt: TO_UPPER, available options: UNCHANGED, TO_UPPER, TO_LOWER
+## @see org.apache.calcite.config.CalciteConnectionProperty.UNQUOTED_CASING
+#kylin.query.calcite.extras-props.unquotedCasing=TO_UPPER
+## quoting method, default: DOUBLE_QUOTE, available options: DOUBLE_QUOTE, BACK_TICK, BRACKET
+## @see org.apache.calcite.config.CalciteConnectionProperty.QUOTING
+#kylin.query.calcite.extras-props.quoting=DOUBLE_QUOTE
+## change SqlConformance from DEFAULT to LENIENT to enable group by ordinal
+## @see org.apache.calcite.sql.validate.SqlConformance.SqlConformanceEnum
+#kylin.query.calcite.extras-props.conformance=LENIENT
+#
+## TABLE ACL
+#kylin.query.security.table-acl-enabled=true
+#
+## Usually should not modify this
+#kylin.query.interceptors=org.apache.kylin.rest.security.TableInterceptor
+#
+#kylin.query.escape-default-keyword=false
+#
+## Usually should not modify this
+#kylin.query.transformers=org.apache.kylin.query.util.DefaultQueryTransformer,org.apache.kylin.query.util.KeywordDefaultDirtyHack
+#
+#### SECURITY ###
+#
+## Spring security profile, options: testing, ldap, saml
+## with "testing" profile, user can use pre-defined name/pwd like KYLIN/ADMIN to login
+#kylin.security.profile=testing
+#
+## Admin roles in LDAP, for ldap and saml
+#kylin.security.acl.admin-role=admin
+#
+## LDAP authentication configuration
+#kylin.security.ldap.connection-server=ldap://ldap_server:389
+#kylin.security.ldap.connection-username=
+#kylin.security.ldap.connection-password=
+#
+## LDAP user account directory;
+#kylin.security.ldap.user-search-base=
+#kylin.security.ldap.user-search-pattern=
+#kylin.security.ldap.user-group-search-base=
+#kylin.security.ldap.user-group-search-filter=(|(member={0})(memberUid={1}))
+#
+## LDAP service account directory
+#kylin.security.ldap.service-search-base=
+#kylin.security.ldap.service-search-pattern=
+#kylin.security.ldap.service-group-search-base=
+#
+### SAML configurations for SSO
+## SAML IDP metadata file location
+#kylin.security.saml.metadata-file=classpath:sso_metadata.xml
+#kylin.security.saml.metadata-entity-base-url=https://hostname/kylin
+#kylin.security.saml.keystore-file=classpath:samlKeystore.jks
+#kylin.security.saml.context-scheme=https
+#kylin.security.saml.context-server-name=hostname
+#kylin.security.saml.context-server-port=443
+#kylin.security.saml.context-path=/kylin
+#
+#### SPARK ENGINE CONFIGS ###
+#
+## Hadoop conf folder, will export this as "HADOOP_CONF_DIR" to run spark-submit
+## This must contain site xmls of core, yarn, hive, and hbase in one folder
+##kylin.env.hadoop-conf-dir=/etc/hadoop/conf
+#
+## Estimate the RDD partition numbers
+#kylin.engine.spark.rdd-partition-cut-mb=10
+#
+## Minimal partition numbers of rdd
+#kylin.engine.spark.min-partition=1
+#
+## Max partition numbers of rdd
+#kylin.engine.spark.max-partition=5000
+#
+## Spark conf (default is in spark/conf/spark-defaults.conf)
+#kylin.engine.spark-conf.spark.master=yarn
+##kylin.engine.spark-conf.spark.submit.deployMode=cluster
+#kylin.engine.spark-conf.spark.yarn.queue=default
+#kylin.engine.spark-conf.spark.driver.memory=2G
+#kylin.engine.spark-conf.spark.executor.memory=4G
+#kylin.engine.spark-conf.spark.executor.instances=40
+#kylin.engine.spark-conf.spark.yarn.executor.memoryOverhead=1024
+#kylin.engine.spark-conf.spark.shuffle.service.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.dir=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.history.fs.logDirectory=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false
+#
+#### Spark conf for specific job
+#kylin.engine.spark-conf-mergedict.spark.executor.memory=6G
+#kylin.engine.spark-conf-mergedict.spark.memory.fraction=0.2
+#
+## manually upload spark-assembly jar to HDFS and then set this property will avoid repeatedly uploading jar at runtime
+##kylin.engine.spark-conf.spark.yarn.archive=hdfs://namenode:8020/kylin/spark/spark-libs.jar
+##kylin.engine.spark-conf.spark.io.compression.codec=org.apache.spark.io.SnappyCompressionCodec
+#
+## uncomment for HDP
+##kylin.engine.spark-conf.spark.driver.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current
+#
+#
+#### QUERY PUSH DOWN ###
+#
+##kylin.query.pushdown.runner-class-name=org.apache.kylin.query.adhoc.PushDownRunnerJdbcImpl
+#
+##kylin.query.pushdown.update-enabled=false
+##kylin.query.pushdown.jdbc.url=jdbc:hive2://sandbox:10000/default
+##kylin.query.pushdown.jdbc.driver=org.apache.hive.jdbc.HiveDriver
+##kylin.query.pushdown.jdbc.username=hive
+##kylin.query.pushdown.jdbc.password=
+#
+##kylin.query.pushdown.jdbc.pool-max-total=8
+##kylin.query.pushdown.jdbc.pool-max-idle=8
+##kylin.query.pushdown.jdbc.pool-min-idle=0
+#
+#### JDBC Data Source
+##kylin.source.jdbc.connection-url=
+##kylin.source.jdbc.driver=
+##kylin.source.jdbc.dialect=
+##kylin.source.jdbc.user=
+##kylin.source.jdbc.pass=
+##kylin.source.jdbc.sqoop-home=
+##kylin.source.jdbc.filed-delimiter=|
+#
+#### Livy with Kylin
+##kylin.engine.livy-conf.livy-enabled=false
+##kylin.engine.livy-conf.livy-url=http://LivyHost:8998
+##kylin.engine.livy-conf.livy-key.file=hdfs:///path-to-kylin-job-jar
+##kylin.engine.livy-conf.livy-arr.jars=hdfs:///path-to-hadoop-dependency-jar
+#
+#
+#### Realtime OLAP ###
+#
+## Where should local segment cache located, for absolute path, the real path will be ${KYLIN_HOME}/${kylin.stream.index.path}
+#kylin.stream.index.path=stream_index
+#
+## The timezone for Derived Time Column like hour_start, try set to GMT+N, please check detail at KYLIN-4010
+#kylin.stream.event.timezone=
+#
+## Debug switch for print realtime global dict encode information, please check detail at KYLIN-4141
+#kylin.stream.print-realtime-dict-enabled=false
+#
+## Should enable latest coordinator, please check detail at KYLIN-4167
+#kylin.stream.new.coordinator-enabled=true
+#
+## In which way should we collect receiver's metrics info
+##kylin.stream.metrics.option=console/csv/jmx
+#
+## When enable a streaming cube, should cousme from earliest offset or least offset
+#kylin.stream.consume.offsets.latest=true
+#
+## The parallelism of scan in receiver side
+#kylin.stream.receiver.use-threads-per-query=8
+#
+## How coordinator/receiver register itself into StreamMetadata, there are three option:
+## 1. hostname:port, then kylin will set the config ip and port as the currentNode;
+## 2. port, then kylin will get the node's hostname and append port as the currentNode;
+## 3. not set, then kylin will get the node hostname address and set the hostname and defaultPort(7070 for coordinator or 9090 for receiver) as the currentNode.
+##kylin.stream.node=
+#
+## Auto resubmit after job be discarded
+#kylin.stream.auto-resubmit-after-discard-enabled=true
diff --git a/kubernetes/config/quickstart/kylin/kylin_hive_conf.xml b/kubernetes/config/quickstart/kylin/kylin_hive_conf.xml
new file mode 100644
index 0000000..f01d08e
--- /dev/null
+++ b/kubernetes/config/quickstart/kylin/kylin_hive_conf.xml
@@ -0,0 +1,102 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>hive.exec.compress.output</name>
+        <value>true</value>
+        <description>Enable compress</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join</name>
+        <value>true</value>
+        <description>Enables the optimization about converting common join into mapjoin</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join.noconditionaltask</name>
+        <value>true</value>
+        <description>enable map-side join</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join.noconditionaltask.size</name>
+        <value>100000000</value>
+        <description>enable map-side join</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description></description>
+    </property>
+    -->
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description></description>
+    </property>
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    -->
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>hive.stats.autogather</name>
+        <value>true</value>
+        <description>Collect statistics for newly created intermediate table</description>
+    </property>
+
+    <property>
+        <name>hive.merge.mapfiles</name>
+        <value>false</value>
+        <description>Disable Hive's auto merge</description>
+    </property>
+
+    <property>
+        <name>hive.merge.mapredfiles</name>
+        <value>false</value>
+        <description>Disable Hive's auto merge</description>
+    </property>
+</configuration>
diff --git a/kubernetes/config/quickstart/kylin/kylin_job_conf.xml b/kubernetes/config/quickstart/kylin/kylin_job_conf.xml
new file mode 100644
index 0000000..17a9145
--- /dev/null
+++ b/kubernetes/config/quickstart/kylin/kylin_job_conf.xml
@@ -0,0 +1,88 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>3600000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+</configuration>
diff --git a/kubernetes/config/quickstart/kylin/kylin_job_conf_cube_merge.xml b/kubernetes/config/quickstart/kylin/kylin_job_conf_cube_merge.xml
new file mode 100644
index 0000000..79365ad
--- /dev/null
+++ b/kubernetes/config/quickstart/kylin/kylin_job_conf_cube_merge.xml
@@ -0,0 +1,104 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>7200000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+    <!--Additional config for cube merge job, giving more memory -->
+    <property>
+        <name>mapreduce.map.memory.mb</name>
+        <value>3072</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.java.opts</name>
+        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.io.sort.mb</name>
+        <value>200</value>
+        <description></description>
+    </property>
+</configuration>
diff --git a/kubernetes/config/quickstart/kylin/kylin_job_conf_inmem.xml b/kubernetes/config/quickstart/kylin/kylin_job_conf_inmem.xml
new file mode 100644
index 0000000..ddda4dd
--- /dev/null
+++ b/kubernetes/config/quickstart/kylin/kylin_job_conf_inmem.xml
@@ -0,0 +1,111 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>mapreduce.job.is-mem-hungry</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>7200000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+    <!--Additional config for in-mem cubing, giving mapper more memory -->
+    <property>
+        <name>mapreduce.map.memory.mb</name>
+        <value>3072</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.java.opts</name>
+        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.io.sort.mb</name>
+        <value>200</value>
+        <description></description>
+    </property>
+
+</configuration>
diff --git a/kubernetes/config/quickstart/kylin/setenv-tool.sh b/kubernetes/config/quickstart/kylin/setenv-tool.sh
new file mode 100644
index 0000000..487b5ef
--- /dev/null
+++ b/kubernetes/config/quickstart/kylin/setenv-tool.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# source me
+
+# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
+# uncomment following to for it to take effect
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx4096M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+# See HADOOP-7154.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
+
+# uncomment following to for it to take effect(the values need adjusting to fit your env)
+# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
+if [[ -d "/usr/hdp/current/hadoop-client" ]]
+then
+   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
+   # attempt to locate JVM native libraries and set corresponding property
+   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
+   then
+      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
+   fi
+else
+   export KYLIN_EXTRA_START_OPTS=""
+   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
+   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
+fi
+
+if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
+then
+    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
+fi
+
+if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
+then
+    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
+fi
+
+if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
+then
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
+fi
diff --git a/kubernetes/config/quickstart/kylin/setenv.sh b/kubernetes/config/quickstart/kylin/setenv.sh
new file mode 100644
index 0000000..fa88769
--- /dev/null
+++ b/kubernetes/config/quickstart/kylin/setenv.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# source me
+
+# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
+# uncomment following to for it to take effect
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx2048M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+# See HADOOP-7154.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
+
+# uncomment following to for it to take effect(the values need adjusting to fit your env)
+# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
+if [[ -d "/usr/hdp/current/hadoop-client" ]]
+then
+   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
+   # attempt to locate JVM native libraries and set corresponding property
+   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
+   then
+      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
+   fi
+else
+   export KYLIN_EXTRA_START_OPTS=""
+   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
+   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
+fi
+
+if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
+then
+    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
+fi
+
+if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
+then
+    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
+fi
+
+if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
+then
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
+fi
diff --git a/kubernetes/docker/README b/kubernetes/docker/README
new file mode 100644
index 0000000..58528cc
--- /dev/null
+++ b/kubernetes/docker/README
@@ -0,0 +1 @@
+Please check README under `hadoop-client` and `kylin-client` for detail.
\ No newline at end of file
diff --git a/kubernetes/docker/hadoop-client/CDH57/Dockerfile b/kubernetes/docker/hadoop-client/CDH57/Dockerfile
new file mode 100644
index 0000000..1f5d203
--- /dev/null
+++ b/kubernetes/docker/hadoop-client/CDH57/Dockerfile
@@ -0,0 +1,49 @@
+FROM centos:7.3.1611
+
+MAINTAINER Apache Kylin
+
+ENV JAVA_VERSION    1.8.0
+ENV SPARK_VERSION   2.3.2
+
+ENV JAVA_HOME       /usr/lib/jvm/java-${JAVA_VERSION}
+ENV HADOOP_HOME     /usr/lib/hadoop
+ENV HIVE_HOME       /usr/lib/hive
+ENV HCAT_HOME       /usr/lib/hive-hcatalog
+ENV HBASE_HOME      /usr/lib/hbase
+ENV SPARK_HOME      /opt/spark-${SPARK_VERSION}-bin-hadoop2.7
+
+ENV PATH $PATH:$SPARK_HOME/bin:
+
+ENV HADOOP_CONF_DIR  /etc/hadoop/conf
+ENV HIVE_CONF_DIR    /etc/hive/conf
+ENV HBASE_CONF_DIR   /etc/hbase/conf
+ENV HIVE_CONF        ${HIVE_CONF_DIR}
+ENV HIVE_LIB         ${HIVE_HOME}/lib
+
+RUN echo $'[cloudera-cdh5] \n\
+# Packages for Cloudera\'s Distribution for Hadoop, Version 5, on RedHat or CentOS 6 x86_64 \n\
+name=Cloudera\'s Distribution for Hadoop, Version 5 \n\
+baseurl=https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/5.7.6/ \n\
+gpgkey =https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/RPM-GPG-KEY-cloudera \n\
+gpgcheck = 1' > /etc/yum.repos.d/cloudera-cdh5.repo
+
+WORKDIR /opt
+
+RUN set -xeu && yum -y  install java-1.8.0-openjdk-devel
+RUN set -xeu && yum -y  install krb5-workstation
+RUN set -xeu && yum -y  install hadoop-client
+RUN set -xeu && yum -y  install hive hive-hcatalog
+RUN set -xeu && yum -y  install hbase
+
+RUN set -xeu && \
+    curl -o ${HIVE_HOME}/lib/hadoop-lzo-0.4.15.jar \
+    https://clojars.org/repo/hadoop-lzo/hadoop-lzo/0.4.15/hadoop-lzo-0.4.15.jar && \
+    curl -o ${HIVE_HOME}/lib/mysql-connector-java-5.1.24.jar \
+    https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.24/mysql-connector-java-5.1.24.jar
+
+RUN set -xeu && \
+    yum -q clean all && \
+    rm -rf /var/cache/yum && \
+    rm -rf /tmp/* /var/tmp/*
+
+ADD provided-binary/spark-${SPARK_VERSION}-bin-hadoop2.7.tgz .
\ No newline at end of file
diff --git a/kubernetes/docker/hadoop-client/CDH57/build-image.sh b/kubernetes/docker/hadoop-client/CDH57/build-image.sh
new file mode 100644
index 0000000..5b51657
--- /dev/null
+++ b/kubernetes/docker/hadoop-client/CDH57/build-image.sh
@@ -0,0 +1 @@
+docker build -t hadoop-client:cdh57 .
\ No newline at end of file
diff --git a/kubernetes/docker/hadoop-client/README.MD b/kubernetes/docker/hadoop-client/README.MD
new file mode 100644
index 0000000..e977d4a
--- /dev/null
+++ b/kubernetes/docker/hadoop-client/README.MD
@@ -0,0 +1,9 @@
+## Background
+What is hadoop-client docker images? 
+And why we need this?
+
+## Build Step
+1. Place Spark binary(*spark-2.3.2-bin-hadoop2.7.tgz*) into dir `provided-binary`.
+2. Depend on which Hadoop Distribution you are use, prepare `Dockerfile`, please take `CDH-5.7` as an example
+3. Run `build-image.sh` to build image.
+
diff --git a/k8s/images/hadoop-client/Dockerfile b/kubernetes/docker/hadoop-client/apache-hadoop2.7/Dockerfile
similarity index 100%
rename from k8s/images/hadoop-client/Dockerfile
rename to kubernetes/docker/hadoop-client/apache-hadoop2.7/Dockerfile
diff --git a/kubernetes/docker/hadoop-client/apache-hadoop2.7/build-image.sh b/kubernetes/docker/hadoop-client/apache-hadoop2.7/build-image.sh
new file mode 100644
index 0000000..3f8ff8d
--- /dev/null
+++ b/kubernetes/docker/hadoop-client/apache-hadoop2.7/build-image.sh
@@ -0,0 +1 @@
+docker build -t hadoop-client:apache-hadoop2.7 .
\ No newline at end of file
diff --git a/k8s/images/kylin/Dockerfile b/kubernetes/docker/kylin-client/Dockerfile
similarity index 80%
rename from k8s/images/kylin/Dockerfile
rename to kubernetes/docker/kylin-client/Dockerfile
index 03bb76a..6ab5d2a 100644
--- a/k8s/images/kylin/Dockerfile
+++ b/kubernetes/docker/kylin-client/Dockerfile
@@ -1,6 +1,15 @@
-ARG HADOOP_CLIENT_VERSION=3.0.0
+FROM hadoop-client:cdh57
+ARG USER=apache_kylin
+ARG USER_HOME=/home/${USER}
+ARG KYLIN_VERSION=apache-kylin-3.0.1-bin-cdh57
+
+ARG HADOOP_CONF_HOME=/etc/hadoop/conf
+ARG HIVE_CONF_HOME=/etc/hive/conf
+ARG HBASE_CONF_HOME=/etc/hbase/conf
+ARG SPARK_CONF_HOME=$KYLIN_HOME/hadoop-conf
 
-FROM apachekylin/hadoop-client:$HADOOP_CLIENT_VERSION
+ENV SPARK_HOME /opt/spark-2.3.2-bin-hadoop2.7
+ENV KYLIN_HOME=$USER_HOME/kylin
 
 MAINTAINER Apache Kylin
 
@@ -20,28 +29,16 @@ RUN set -x \
        bind-utils \
     && yum clean all
 
-ARG KYLIN_VERSION=3.0.0
-ARG USER=apache_kylin
-
-ARG USER_HOME=/home/$USER
 RUN set -x \
     && groupadd -r $USER \
     && useradd -r -m -g $USER $USER -d $USER_HOME \
     && echo '$USER ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
 
-ENV KYLIN_HOME=$USER_HOME/kylin
-
-
-COPY --chown=$USER:$USER apache-kylin-${KYLIN_VERSION}-bin $KYLIN_HOME
-
-ARG HADOOP_CONF_HOME=/apache/hadoop/etc/hadoop
-ARG HIVE_CONF_HOME=/apache/hive/conf
-ARG HBASE_CONF_HOME=/apache/hbase/conf
-ARG SPARK_CONF_HOME=$KYLIN_HOME/hadoop-conf
+COPY --chown=$USER:$USER $KYLIN_VERSION $KYLIN_HOME
 
 RUN set -x \
     && unzip -qq $KYLIN_HOME/tomcat/webapps/kylin.war -d $KYLIN_HOME/tomcat/webapps/kylin \
-    && chown -R $USER:$USER $KYLIN_HOME/tomcat/webapps/kylin \
+#    && chown -R $USER:$USER $KYLIN_HOME/tomcat/webapps/kylin \
     && rm $KYLIN_HOME/tomcat/webapps/kylin.war \
     && mkdir $SPARK_CONF_HOME \
     && ln -s $HADOOP_CONF_HOME/core-site.xml $SPARK_CONF_HOME/core-site.xml \
@@ -63,6 +60,5 @@ RUN /usr/bin/crontab -u $USER /tmp/crontab.txt \
     && chmod 755 $TOOL_HOME/*
 EXPOSE 7070
 
-USER $USER
-
+USER root
 CMD ["sh", "-c", "$TOOL_HOME/bootstrap.sh server -d"]
diff --git a/kubernetes/docker/kylin-client/README b/kubernetes/docker/kylin-client/README
new file mode 100644
index 0000000..bfc5a84
--- /dev/null
+++ b/kubernetes/docker/kylin-client/README
@@ -0,0 +1,8 @@
+## Background
+What is kylin-client docker images?
+And why we need this?
+
+## Build Step
+1. Place Kylin binary(*spark-2.3.2-bin-hadoop2.7.tgz*) and uncompress it into current dir.
+2. Modify `Dockerfile` , change the value of `KYLIN_VERSION` and name of base image(hadoop-client).
+3. Run `build-image.sh` to build image.
\ No newline at end of file
diff --git a/k8s/images/kylin/bin/bootstrap.sh b/kubernetes/docker/kylin-client/bin/bootstrap.sh
similarity index 76%
rename from k8s/images/kylin/bin/bootstrap.sh
rename to kubernetes/docker/kylin-client/bin/bootstrap.sh
index bbce0b9..911f845 100755
--- a/k8s/images/kylin/bin/bootstrap.sh
+++ b/kubernetes/docker/kylin-client/bin/bootstrap.sh
@@ -1,8 +1,5 @@
 #!/bin/bash
 
-# enable kite
-/home/b_kylin/kite2/b_kylin/bin/run.sh
-
 # enable cron job
 sudo crond -i -p
 
@@ -16,5 +13,5 @@ elif [[ $1 == "streaming" ]]; then
 fi
 
 if [[ $2 == "-d" ]]; then
-  while true; do sleep 1000; done
+  while true; do sleep 3000; done
 fi
diff --git a/kubernetes/docker/kylin-client/bin/check-liveness.sh b/kubernetes/docker/kylin-client/bin/check-liveness.sh
new file mode 100644
index 0000000..cc1f786
--- /dev/null
+++ b/kubernetes/docker/kylin-client/bin/check-liveness.sh
@@ -0,0 +1 @@
+#!/bin/bash
\ No newline at end of file
diff --git a/k8s/images/kylin/bin/check-readiness.sh b/kubernetes/docker/kylin-client/bin/check-readiness.sh
similarity index 100%
rename from k8s/images/kylin/bin/check-readiness.sh
rename to kubernetes/docker/kylin-client/bin/check-readiness.sh
diff --git a/k8s/images/kylin/bin/clean-log.sh b/kubernetes/docker/kylin-client/bin/clean-log.sh
similarity index 100%
rename from k8s/images/kylin/bin/clean-log.sh
rename to kubernetes/docker/kylin-client/bin/clean-log.sh
diff --git a/kubernetes/docker/kylin-client/build-image.sh b/kubernetes/docker/kylin-client/build-image.sh
new file mode 100644
index 0000000..daa9743
--- /dev/null
+++ b/kubernetes/docker/kylin-client/build-image.sh
@@ -0,0 +1,2 @@
+docker build -t kylin-client:3.0.1-cdh57 .
+docker save -o kylin-client-3.0.1-cdh57.tar kylin-client:3.0.1-cdh57
\ No newline at end of file
diff --git a/k8s/images/kylin/crontab.txt b/kubernetes/docker/kylin-client/crontab.txt
similarity index 100%
rename from k8s/images/kylin/crontab.txt
rename to kubernetes/docker/kylin-client/crontab.txt
diff --git a/kubernetes/docker/upload.sh b/kubernetes/docker/upload.sh
new file mode 100644
index 0000000..9c2c57d
--- /dev/null
+++ b/kubernetes/docker/upload.sh
@@ -0,0 +1,2 @@
+docker save -o kylin-cdh.tar kylin-client:3.0.1-cdh57
+scp kylin-cdh.tar ubuntu@10.1.2.41/home/ubuntu/
\ No newline at end of file
diff --git a/kubernetes/example/README b/kubernetes/example/README
new file mode 100644
index 0000000..44416d1
--- /dev/null
+++ b/kubernetes/example/README
@@ -0,0 +1,10 @@
+## Example
+
+> This dir provided a deployment template with all required files.
+
+### Pre-requirement
+
+- A healthy CDH 5.7 cluster
+- A healthy on-premise K8s cluster
+
+### Step
\ No newline at end of file
diff --git a/kubernetes/example/config/filebeat/filebeat.yml b/kubernetes/example/config/filebeat/filebeat.yml
index c78c7e6..6916da1 100644
--- a/kubernetes/example/config/filebeat/filebeat.yml
+++ b/kubernetes/example/config/filebeat/filebeat.yml
@@ -18,7 +18,7 @@ filebeat.prospectors:
 - type: log
   enabled: true
   paths:
-    - /var/log/kylin/kylin.log
+    - /home/apache_kylin/kylin/logs
   multiline.pattern: '^\d{4}-([1][0-2]|[0][0-9])-([3][0-1]|[1-2][0-9]|[0][1-9]|[1-9]) ([2][0-3]|[0-1][0-9]|[1-9]):[0-5][0-9]:([0-5][0-9]|[6][0])'
   multiline.negate: true
   multiline.match: after
@@ -30,7 +30,7 @@ filebeat.prospectors:
 - type: log
   enabled: true
   paths:
-    - /var/log/kylin/kylin.gc.*.current
+    - /home/apache_kylin/kylin/logs/kylin.gc.*.current
   multiline.pattern: '^\d{4}-(?:0?[1-9]|1[0-2])-(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9])T(?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?)(?:Z|[+-](?:2[0123]|[01]?[0-9])(?::?(?:[0-5][0-9])))'
   multiline.negate: true
   multiline.match: after
@@ -42,7 +42,7 @@ filebeat.prospectors:
 - type: log
   enabled: true
   paths:
-    - /var/log/kylin/kylin.out
+    - /home/apache_kylin/kylin/logs/kylin.out
   multiline.pattern: '^\b(?:Jan?|Feb?|Mar?|Apr?|May?|Jun?|Jul?|Aug?|Sep?|Oct?|Nov?|Dec?)\b (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]), \d{4} (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) (?:AM|PM)'
   multiline.negate: true
   multiline.match: after
@@ -53,7 +53,7 @@ filebeat.prospectors:
 # access log #
 - type: log
   paths:
-    - /var/log/tomcat/localhost_access_log.txt
+    - /home/apache_kylin/kylin/tomcat/logs/localhost_access_log.txt
   fields:
     name: "access"
     pipeline: "access"
@@ -61,7 +61,7 @@ filebeat.prospectors:
 # catalina log #
 - type: log
   paths:
-    - /var/log/tomcat/catalina.*.log
+    - /home/apache_kylin/kylin/tomcat/logs/catalina.*.log
   multiline.pattern: '^\b(?:Jan?|Feb?|Mar?|Apr?|May?|Jun?|Jul?|Aug?|Sep?|Oct?|Nov?|Dec?)\b (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]), \d{4} (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) (?:AM|PM)'
   multiline.negate: true
   multiline.match: after
@@ -72,7 +72,7 @@ filebeat.prospectors:
 # localhost log #
 - type: log
   paths:
-    - /var/log/tomcat/localhost.*.log
+    - /home/apache_kylin/kylin/tomcat/logs/localhost.*.log
   multiline.pattern: '^\b(?:Jan?|Feb?|Mar?|Apr?|May?|Jun?|Jul?|Aug?|Sep?|Oct?|Nov?|Dec?)\b (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]), \d{4} (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) (?:AM|PM)'
   multiline.negate: true
   multiline.match: after
@@ -103,8 +103,6 @@ setup.kibana:
 #-------------------------- Elasticsearch output ------------------------------
 output.elasticsearch:
   # Array of hosts to connect to.
-  hosts: []
-  protocol: "https"
-  username: ""
-  password: ""
+  hosts: ["cdh-master:9200"]
+  protocol: "http"
   index: "kylin-%{[fields.name]}-%{+yyyy-MM-dd}"
\ No newline at end of file
diff --git a/kubernetes/example/config/hadoop/core-site.xml b/kubernetes/example/config/hadoop/core-site.xml
new file mode 100644
index 0000000..d5622cb
--- /dev/null
+++ b/kubernetes/example/config/hadoop/core-site.xml
@@ -0,0 +1,133 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--Autogenerated by Cloudera Manager-->
+<configuration>
+  <property>
+    <name>fs.defaultFS</name>
+    <value>hdfs://cdh-master:8020</value>
+  </property>
+  <property>
+    <name>fs.trash.interval</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>io.compression.codecs</name>
+    <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.DeflateCodec,org.apache.hadoop.io.compress.SnappyCodec,org.apache.hadoop.io.compress.Lz4Codec</value>
+  </property>
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value>simple</value>
+  </property>
+  <property>
+    <name>hadoop.security.authorization</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hadoop.rpc.protection</name>
+    <value>authentication</value>
+  </property>
+  <property>
+    <name>hadoop.ssl.require.client.cert</name>
+    <value>false</value>
+    <final>true</final>
+  </property>
+  <property>
+    <name>hadoop.ssl.keystores.factory.class</name>
+    <value>org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory</value>
+    <final>true</final>
+  </property>
+  <property>
+    <name>hadoop.ssl.server.conf</name>
+    <value>ssl-server.xml</value>
+    <final>true</final>
+  </property>
+  <property>
+    <name>hadoop.ssl.client.conf</name>
+    <value>ssl-client.xml</value>
+    <final>true</final>
+  </property>
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>DEFAULT</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.oozie.hosts</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.oozie.groups</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.mapred.hosts</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.mapred.groups</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.flume.hosts</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.flume.groups</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.HTTP.hosts</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.HTTP.groups</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.hive.hosts</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.hive.groups</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.hue.hosts</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.hue.groups</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.httpfs.hosts</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.httpfs.groups</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.hdfs.groups</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.hdfs.hosts</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.yarn.hosts</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.proxyuser.yarn.groups</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>hadoop.security.group.mapping</name>
+    <value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
+  </property>
+  <property>
+    <name>hadoop.security.instrumentation.requires.admin</name>
+    <value>false</value>
+  </property>
+</configuration>
diff --git a/kubernetes/example/config/hadoop/hbase-site.xml b/kubernetes/example/config/hadoop/hbase-site.xml
new file mode 100644
index 0000000..e9b7069
--- /dev/null
+++ b/kubernetes/example/config/hadoop/hbase-site.xml
@@ -0,0 +1,109 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--Autogenerated by Cloudera Manager-->
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://cdh-master:8020/hbase</value>
+  </property>
+  <property>
+    <name>hbase.client.write.buffer</name>
+    <value>2097152</value>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>100</value>
+  </property>
+  <property>
+    <name>hbase.client.retries.number</name>
+    <value>35</value>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>100</value>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value>10485760</value>
+  </property>
+  <property>
+    <name>hbase.ipc.client.allowsInterrupt</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hbase.client.primaryCallTimeout.get</name>
+    <value>10</value>
+  </property>
+  <property>
+    <name>hbase.client.primaryCallTimeout.multiget</name>
+    <value>10</value>
+  </property>
+  <property>
+    <name>hbase.client.scanner.timeout.period</name>
+    <value>60000</value>
+  </property>
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value>org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint</value>
+  </property>
+  <property>
+    <name>hbase.regionserver.thrift.http</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hbase.thrift.support.proxyuser</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hbase.rpc.timeout</name>
+    <value>60000</value>
+  </property>
+  <property>
+    <name>hbase.snapshot.enabled</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hbase.snapshot.master.timeoutMillis</name>
+    <value>60000</value>
+  </property>
+  <property>
+    <name>hbase.snapshot.region.timeout</name>
+    <value>60000</value>
+  </property>
+  <property>
+    <name>hbase.snapshot.master.timeout.millis</name>
+    <value>60000</value>
+  </property>
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+  </property>
+  <property>
+    <name>hbase.rpc.protection</name>
+    <value>authentication</value>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>60000</value>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase</value>
+  </property>
+  <property>
+    <name>zookeeper.znode.rootserver</name>
+    <value>root-region-server</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>cdh-master</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+  </property>
+  <property>
+    <name>hbase.rest.ssl.enabled</name>
+    <value>false</value>
+  </property>
+</configuration>
diff --git a/kubernetes/example/config/hadoop/hdfs-site.xml b/kubernetes/example/config/hadoop/hdfs-site.xml
new file mode 100644
index 0000000..f8e8fa0
--- /dev/null
+++ b/kubernetes/example/config/hadoop/hdfs-site.xml
@@ -0,0 +1,69 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--Autogenerated by Cloudera Manager-->
+<configuration>
+  <property>
+    <name>dfs.namenode.name.dir</name>
+    <value>file:///dfs/nn</value>
+  </property>
+  <property>
+    <name>dfs.namenode.servicerpc-address</name>
+    <value>cdh-master:8022</value>
+  </property>
+  <property>
+    <name>dfs.https.address</name>
+    <value>cdh-master:50470</value>
+  </property>
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+  </property>
+  <property>
+    <name>dfs.namenode.http-address</name>
+    <value>cdh-master:50070</value>
+  </property>
+  <property>
+    <name>dfs.replication</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>dfs.blocksize</name>
+    <value>134217728</value>
+  </property>
+  <property>
+    <name>dfs.client.use.datanode.hostname</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>022</value>
+  </property>
+  <property>
+    <name>dfs.namenode.acls.enabled</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>dfs.client.use.legacy.blockreader</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>dfs.client.read.shortcircuit</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/run/hdfs-sockets/dn</value>
+  </property>
+  <property>
+    <name>dfs.client.read.shortcircuit.skip.checksum</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>dfs.client.domain.socket.data.traffic</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
+    <value>true</value>
+  </property>
+</configuration>
diff --git a/kubernetes/example/config/hadoop/hive-site.xml b/kubernetes/example/config/hadoop/hive-site.xml
new file mode 100644
index 0000000..598c690
--- /dev/null
+++ b/kubernetes/example/config/hadoop/hive-site.xml
@@ -0,0 +1,221 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--Autogenerated by Cloudera Manager-->
+<configuration>
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://cdh-master:9083</value>
+  </property>
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>300</value>
+  </property>
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/user/hive/warehouse</value>
+  </property>
+  <property>
+    <name>hive.warehouse.subdir.inherit.perms</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>20971520</value>
+  </property>
+  <property>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hive.smbjoin.cache.rows</name>
+    <value>10000</value>
+  </property>
+  <property>
+    <name>hive.server2.logging.operation.enabled</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hive.server2.logging.operation.log.location</name>
+    <value>/var/log/hive/operation_logs</value>
+  </property>
+  <property>
+    <name>mapred.reduce.tasks</name>
+    <value>-1</value>
+  </property>
+  <property>
+    <name>hive.exec.reducers.bytes.per.reducer</name>
+    <value>67108864</value>
+  </property>
+  <property>
+    <name>hive.exec.copyfile.maxsize</name>
+    <value>33554432</value>
+  </property>
+  <property>
+    <name>hive.exec.reducers.max</name>
+    <value>1099</value>
+  </property>
+  <property>
+    <name>hive.vectorized.groupby.checkinterval</name>
+    <value>4096</value>
+  </property>
+  <property>
+    <name>hive.vectorized.groupby.flush.percent</name>
+    <value>0.1</value>
+  </property>
+  <property>
+    <name>hive.compute.query.using.stats</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hive.vectorized.execution.reduce.enabled</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hive.merge.mapfiles</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hive.merge.mapredfiles</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hive.cbo.enable</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hive.fetch.task.conversion</name>
+    <value>minimal</value>
+  </property>
+  <property>
+    <name>hive.fetch.task.conversion.threshold</name>
+    <value>268435456</value>
+  </property>
+  <property>
+    <name>hive.limit.pushdown.memory.usage</name>
+    <value>0.1</value>
+  </property>
+  <property>
+    <name>hive.merge.sparkfiles</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hive.merge.smallfiles.avgsize</name>
+    <value>16777216</value>
+  </property>
+  <property>
+    <name>hive.merge.size.per.task</name>
+    <value>268435456</value>
+  </property>
+  <property>
+    <name>hive.optimize.reducededuplication</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>4</value>
+  </property>
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hive.map.aggr.hash.percentmemory</name>
+    <value>0.5</value>
+  </property>
+  <property>
+    <name>hive.optimize.sort.dynamic.partition</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>hive.execution.engine</name>
+    <value>mr</value>
+  </property>
+  <property>
+    <name>spark.executor.memory</name>
+    <value>10762321920</value>
+  </property>
+  <property>
+    <name>spark.driver.memory</name>
+    <value>3865470566</value>
+  </property>
+  <property>
+    <name>spark.executor.cores</name>
+    <value>5</value>
+  </property>
+  <property>
+    <name>spark.yarn.driver.memoryOverhead</name>
+    <value>409</value>
+  </property>
+  <property>
+    <name>spark.yarn.executor.memoryOverhead</name>
+    <value>1811</value>
+  </property>
+  <property>
+    <name>spark.dynamicAllocation.enabled</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>spark.dynamicAllocation.initialExecutors</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>spark.dynamicAllocation.minExecutors</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>spark.dynamicAllocation.maxExecutors</name>
+    <value>2147483647</value>
+  </property>
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hive.support.concurrency</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hive.zookeeper.quorum</name>
+    <value>cdh-master</value>
+  </property>
+  <property>
+    <name>hive.zookeeper.client.port</name>
+    <value>2181</value>
+  </property>
+  <property>
+    <name>hive.zookeeper.namespace</name>
+    <value>hive_zookeeper_namespace_hive</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>cdh-master</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+  </property>
+  <property>
+    <name>hive.cluster.delegation.token.store.class</name>
+    <value>org.apache.hadoop.hive.thrift.MemoryTokenStore</value>
+  </property>
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>hive.server2.use.SSL</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>spark.shuffle.service.enabled</name>
+    <value>true</value>
+  </property>
+</configuration>
diff --git a/kubernetes/example/config/hadoop/mapred-site.xml b/kubernetes/example/config/hadoop/mapred-site.xml
new file mode 100644
index 0000000..842b7fb
--- /dev/null
+++ b/kubernetes/example/config/hadoop/mapred-site.xml
@@ -0,0 +1,177 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--Autogenerated by Cloudera Manager-->
+<configuration>
+  <property>
+    <name>mapreduce.job.split.metainfo.maxsize</name>
+    <value>10000000</value>
+  </property>
+  <property>
+    <name>mapreduce.job.counters.max</name>
+    <value>120</value>
+  </property>
+  <property>
+    <name>mapreduce.job.counters.groups.max</name>
+    <value>50</value>
+  </property>
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress.type</name>
+    <value>BLOCK</value>
+  </property>
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress.codec</name>
+    <value>org.apache.hadoop.io.compress.DefaultCodec</value>
+  </property>
+  <property>
+    <name>mapreduce.map.output.compress.codec</name>
+    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+  </property>
+  <property>
+    <name>mapreduce.map.output.compress</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>zlib.compress.level</name>
+    <value>DEFAULT_COMPRESSION</value>
+  </property>
+  <property>
+    <name>mapreduce.task.io.sort.factor</name>
+    <value>64</value>
+  </property>
+  <property>
+    <name>mapreduce.map.sort.spill.percent</name>
+    <value>0.8</value>
+  </property>
+  <property>
+    <name>mapreduce.reduce.shuffle.parallelcopies</name>
+    <value>10</value>
+  </property>
+  <property>
+    <name>mapreduce.task.timeout</name>
+    <value>600000</value>
+  </property>
+  <property>
+    <name>mapreduce.client.submit.file.replication</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>mapreduce.job.reduces</name>
+    <value>10</value>
+  </property>
+  <property>
+    <name>mapreduce.task.io.sort.mb</name>
+    <value>256</value>
+  </property>
+  <property>
+    <name>mapreduce.map.speculative</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>mapreduce.reduce.speculative</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
+    <value>0.8</value>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.address</name>
+    <value>cdh-master:10020</value>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.webapp.address</name>
+    <value>cdh-master:19888</value>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.webapp.https.address</name>
+    <value>cdh-master:19890</value>
+  </property>
+  <property>
+    <name>mapreduce.jobhistory.admin.address</name>
+    <value>cdh-master:10033</value>
+  </property>
+  <property>
+    <name>mapreduce.framework.name</name>
+    <value>yarn</value>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.staging-dir</name>
+    <value>/user</value>
+  </property>
+  <property>
+    <name>mapreduce.am.max-attempts</name>
+    <value>2</value>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.resource.mb</name>
+    <value>1024</value>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.resource.cpu-vcores</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>mapreduce.job.ubertask.enable</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.command-opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Xmx825955249</value>
+  </property>
+  <property>
+    <name>mapreduce.map.java.opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Xmx2560m</value>
+  </property>
+  <property>
+    <name>mapreduce.reduce.java.opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Xmx4608m</value>
+  </property>
+  <property>
+    <name>yarn.app.mapreduce.am.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH</value>
+  </property>
+  <property>
+    <name>mapreduce.map.memory.mb</name>
+    <value>3072</value>
+  </property>
+  <property>
+    <name>mapreduce.map.cpu.vcores</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>mapreduce.reduce.memory.mb</name>
+    <value>5120</value>
+  </property>
+  <property>
+    <name>mapreduce.reduce.cpu.vcores</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>mapreduce.job.heap.memory-mb.ratio</name>
+    <value>0.8</value>
+  </property>
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>$HADOOP_MAPRED_HOME/*,$HADOOP_MAPRED_HOME/lib/*,$MR2_CLASSPATH</value>
+  </property>
+  <property>
+    <name>mapreduce.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native:$JAVA_LIBRARY_PATH</value>
+  </property>
+  <property>
+    <name>mapreduce.job.acl-view-job</name>
+    <value> </value>
+  </property>
+  <property>
+    <name>mapreduce.job.acl-modify-job</name>
+    <value> </value>
+  </property>
+  <property>
+    <name>mapreduce.cluster.acls.enabled</name>
+    <value>false</value>
+  </property>
+</configuration>
diff --git a/kubernetes/example/config/hadoop/yarn-site.xml b/kubernetes/example/config/hadoop/yarn-site.xml
new file mode 100644
index 0000000..569cc20
--- /dev/null
+++ b/kubernetes/example/config/hadoop/yarn-site.xml
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--Autogenerated by Cloudera Manager-->
+<configuration>
+  <property>
+    <name>yarn.acl.enable</name>
+    <value>true</value>
+  </property>
+  <property>
+    <name>yarn.admin.acl</name>
+    <value>*</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>cdh-master:8032</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.admin.address</name>
+    <value>cdh-master:8033</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>cdh-master:8030</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address</name>
+    <value>cdh-master:8031</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.address</name>
+    <value>cdh-master:8088</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.webapp.https.address</name>
+    <value>cdh-master:8090</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.client.thread-count</name>
+    <value>50</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.client.thread-count</name>
+    <value>50</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.admin.client.thread-count</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>1024</value>
+  </property>
+  <property>
+    <name>yarn.scheduler.increment-allocation-mb</name>
+    <value>512</value>
+  </property>
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>32150</value>
+  </property>
+  <property>
+    <name>yarn.scheduler.minimum-allocation-vcores</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>yarn.scheduler.increment-allocation-vcores</name>
+    <value>1</value>
+  </property>
+  <property>
+    <name>yarn.scheduler.maximum-allocation-vcores</name>
+    <value>10</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.amliveliness-monitor.interval-ms</name>
+    <value>1000</value>
+  </property>
+  <property>
+    <name>yarn.am.liveness-monitor.expiry-interval-ms</name>
+    <value>600000</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.am.max-attempts</name>
+    <value>2</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.container.liveness-monitor.interval-ms</name>
+    <value>600000</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.nm.liveness-monitor.interval-ms</name>
+    <value>1000</value>
+  </property>
+  <property>
+    <name>yarn.nm.liveness-monitor.expiry-interval-ms</name>
+    <value>600000</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.client.thread-count</name>
+    <value>50</value>
+  </property>
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>$HADOOP_CLIENT_CONF_DIR,$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/*,$HADOOP_COMMON_HOME/lib/*,$HADOOP_HDFS_HOME/*,$HADOOP_HDFS_HOME/lib/*,$HADOOP_YARN_HOME/*,$HADOOP_YARN_HOME/lib/*</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.scheduler.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
+  </property>
+  <property>
+    <name>yarn.resourcemanager.max-completed-applications</name>
+    <value>10000</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/tmp/logs</value>
+  </property>
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+  </property>
+</configuration>
diff --git a/kubernetes/example/config/kylin-job/kylin-kafka-consumer.xml b/kubernetes/example/config/kylin-job/kylin-kafka-consumer.xml
new file mode 100644
index 0000000..8529a41
--- /dev/null
+++ b/kubernetes/example/config/kylin-job/kylin-kafka-consumer.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+ for more kafka consumer configs, please refer to http://kafka.apache.org/documentation#consumerconfigs
+-->
+<configuration>
+    <property>
+        <name>session.timeout.ms</name>
+        <value>10000</value>
+    </property>
+    <property>
+        <name>request.timeout.ms</name>
+        <value>20000</value>
+    </property>
+</configuration>
\ No newline at end of file
diff --git a/k8s/developments/config/kylin/kylin-server-log4j.properties b/kubernetes/example/config/kylin-job/kylin-server-log4j.properties
similarity index 100%
copy from k8s/developments/config/kylin/kylin-server-log4j.properties
copy to kubernetes/example/config/kylin-job/kylin-server-log4j.properties
diff --git a/k8s/developments/config/kylin/kylin-tools-log4j.properties b/kubernetes/example/config/kylin-job/kylin-spark-log4j.properties
similarity index 51%
copy from k8s/developments/config/kylin/kylin-tools-log4j.properties
copy to kubernetes/example/config/kylin-job/kylin-spark-log4j.properties
index 54d18c2..948fb32 100644
--- a/k8s/developments/config/kylin/kylin-tools-log4j.properties
+++ b/kubernetes/example/config/kylin-job/kylin-spark-log4j.properties
@@ -15,24 +15,29 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-
-
-# the kylin-tools-log4j.properties is mainly for configuring log properties on kylin tools, including:
-#   1. tools launched by kylin.sh script, e.g. DeployCoprocessorCLI
-#   2. DebugTomcat
-#   3. others
-#
-# It's called kylin-tools-log4j.properties so that it won't distract users from the other more important log4j config file: kylin-server-log4j.properties
-# enable this by -Dlog4j.configuration=kylin-tools-log4j.properties
-
-log4j.rootLogger=INFO,stderr
+log4j.rootCategory=WARN,stderr,stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.target=System.out
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
 
 log4j.appender.stderr=org.apache.log4j.ConsoleAppender
 log4j.appender.stderr.Target=System.err
 log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
 log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
 
-#log4j.logger.org.apache.hadoop=ERROR
-log4j.logger.org.apache.kylin=DEBUG
-log4j.logger.org.springframework=WARN
-log4j.logger.org.apache.kylin.tool.shaded=INFO
\ No newline at end of file
+
+# Settings to quiet third party logs that are too verbose
+log4j.logger.org.spark-project.jetty=WARN
+log4j.logger.org.spark-project.jetty.util.component.AbstractLifeCycle=ERROR
+log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
+log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
+log4j.logger.org.apache.parquet=ERROR
+log4j.logger.parquet=ERROR
+
+# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
+log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
+log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
+log4j.logger.org.apache.spark.sql=WARN
+
+log4j.logger.org.apache.kylin=DEBUG
\ No newline at end of file
diff --git a/k8s/developments/config/kylin/kylin-tools-log4j.properties b/kubernetes/example/config/kylin-job/kylin-tools-log4j.properties
similarity index 100%
copy from k8s/developments/config/kylin/kylin-tools-log4j.properties
copy to kubernetes/example/config/kylin-job/kylin-tools-log4j.properties
diff --git a/kubernetes/example/config/kylin-job/kylin.properties b/kubernetes/example/config/kylin-job/kylin.properties
new file mode 100644
index 0000000..6fd2095
--- /dev/null
+++ b/kubernetes/example/config/kylin-job/kylin.properties
@@ -0,0 +1,422 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+kylin.cache.memcached.hosts=xxx:11211
+kylin.query.cache-signature-enabled=true
+kylin.query.lazy-query-enabled=true
+kylin.metrics.memcached.enabled=true
+kylin.query.segment-cache-enabled=false
+
+kylin.metrics.monitor-enabled=true
+kylin.metrics.reporter-query-enabled=true
+kylin.metrics.reporter-job-enabled=true
+
+# The below commented values will effect as default settings
+# Uncomment and override them if necessary
+
+
+
+#
+#### METADATA | ENV ###
+#
+## The metadata store in hbase
+kylin.metadata.url=kylin_metadata_k8s_poc@hbase
+#
+## metadata cache sync retry times
+#kylin.metadata.sync-retries=3
+#
+## Working folder in HDFS, better be qualified absolute path, make sure user has the right permission to this directory
+#kylin.env.hdfs-working-dir=/kylin
+#
+## DEV|QA|PROD. DEV will turn on some dev features, QA and PROD has no difference in terms of functions.
+#kylin.env=QA
+#
+## kylin zk base path
+#kylin.env.zookeeper-base-path=/kylin
+#
+#### SERVER | WEB | RESTCLIENT ###
+#
+## Kylin server mode, valid value [all, query, job]
+kylin.server.mode=job
+#
+## List of web servers in use, this enables one web server instance to sync up with other servers.
+#kylin.server.cluster-servers=localhost:7070
+#
+## Display timezone on UI,format like[GMT+N or GMT-N]
+#kylin.web.timezone=
+#
+## Timeout value for the queries submitted through the Web UI, in milliseconds
+#kylin.web.query-timeout=300000
+#
+#kylin.web.cross-domain-enabled=true
+#
+##allow user to export query result
+#kylin.web.export-allow-admin=true
+#kylin.web.export-allow-other=true
+#
+## Hide measures in measure list of cube designer, separate by comma
+#kylin.web.hide-measures=RAW
+#
+##max connections of one route
+#kylin.restclient.connection.default-max-per-route=20
+#
+##max connections of one rest-client
+#kylin.restclient.connection.max-total=200
+#
+#### PUBLIC CONFIG ###
+#kylin.engine.default=2
+#kylin.storage.default=2
+#kylin.web.hive-limit=20
+#kylin.web.help.length=4
+#kylin.web.help.0=start|Getting Started|http://kylin.apache.org/docs/tutorial/kylin_sample.html
+#kylin.web.help.1=odbc|ODBC Driver|http://kylin.apache.org/docs/tutorial/odbc.html
+#kylin.web.help.2=tableau|Tableau Guide|http://kylin.apache.org/docs/tutorial/tableau_91.html
+#kylin.web.help.3=onboard|Cube Design Tutorial|http://kylin.apache.org/docs/howto/howto_optimize_cubes.html
+#kylin.web.link-streaming-guide=http://kylin.apache.org/
+#kylin.htrace.show-gui-trace-toggle=false
+#kylin.web.link-hadoop=
+#kylin.web.link-diagnostic=
+#kylin.web.contact-mail=
+#kylin.server.external-acl-provider=
+#
+## Default time filter for job list, 0->current day, 1->last one day, 2->last one week, 3->last one year, 4->all
+#kylin.web.default-time-filter=1
+#
+#### SOURCE ###
+#
+## Hive client, valid value [cli, beeline]
+#kylin.source.hive.client=cli
+#
+## Absolute path to beeline shell, can be set to spark beeline instead of the default hive beeline on PATH
+#kylin.source.hive.beeline-shell=beeline
+#
+## Parameters for beeline client, only necessary if hive client is beeline
+##kylin.source.hive.beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+## While hive client uses above settings to read hive table metadata,
+## table operations can go through a separate SparkSQL command line, given SparkSQL connects to the same Hive metastore.
+#kylin.source.hive.enable-sparksql-for-table-ops=false
+##kylin.source.hive.sparksql-beeline-shell=/path/to/spark-client/bin/beeline
+##kylin.source.hive.sparksql-beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
+#
+#kylin.source.hive.keep-flat-table=false
+#
+## Hive database name for putting the intermediate flat tables
+#kylin.source.hive.database-for-flat-table=default
+#
+## Whether redistribute the intermediate flat table before building
+#kylin.source.hive.redistribute-flat-table=true
+#
+#
+#### STORAGE ###
+#
+## The storage for final cube file in hbase
+#kylin.storage.url=hbase
+#
+## The prefix of hbase table
+kylin.storage.hbase.table-name-prefix=K8S_
+#
+## The namespace for hbase storage
+kylin.storage.hbase.namespace=lacus
+#
+## Compression codec for htable, valid value [none, snappy, lzo, gzip, lz4]
+#kylin.storage.hbase.compression-codec=none
+#
+## HBase Cluster FileSystem, which serving hbase, format as hdfs://hbase-cluster:8020
+## Leave empty if hbase running on same cluster with hive and mapreduce
+##kylin.storage.hbase.cluster-fs=
+#
+## The cut size for hbase region, in GB.
+#kylin.storage.hbase.region-cut-gb=5
+#
+## The hfile size of GB, smaller hfile leading to the converting hfile MR has more reducers and be faster.
+## Set 0 to disable this optimization.
+#kylin.storage.hbase.hfile-size-gb=2
+#
+#kylin.storage.hbase.min-region-count=1
+#kylin.storage.hbase.max-region-count=500
+#
+## Optional information for the owner of kylin platform, it can be your team's email
+## Currently it will be attached to each kylin's htable attribute
+#kylin.storage.hbase.owner-tag=whoami@kylin.apache.org
+#
+#kylin.storage.hbase.coprocessor-mem-gb=3
+#
+## By default kylin can spill query's intermediate results to disks when it's consuming too much memory.
+## Set it to false if you want query to abort immediately in such condition.
+#kylin.storage.partition.aggr-spill-enabled=true
+#
+## The maximum number of bytes each coprocessor is allowed to scan.
+## To allow arbitrary large scan, you can set it to 0.
+#kylin.storage.partition.max-scan-bytes=3221225472
+#
+## The default coprocessor timeout is (hbase.rpc.timeout * 0.9) / 1000 seconds,
+## You can set it to a smaller value. 0 means use default.
+## kylin.storage.hbase.coprocessor-timeout-seconds=0
+#
+## clean real storage after delete operation
+## if you want to delete the real storage like htable of deleting segment, you can set it to true
+#kylin.storage.clean-after-delete-operation=false
+#
+#### JOB ###
+#
+## Max job retry on error, default 0: no retry
+#kylin.job.retry=0
+#
+## Max count of concurrent jobs running
+#kylin.job.max-concurrent-jobs=10
+#
+## The percentage of the sampling, default 100%
+#kylin.job.sampling-percentage=100
+#
+## If true, will send email notification on job complete
+##kylin.job.notification-enabled=true
+##kylin.job.notification-mail-enable-starttls=true
+##kylin.job.notification-mail-host=smtp.office365.com
+##kylin.job.notification-mail-port=587
+##kylin.job.notification-mail-username=kylin@example.com
+##kylin.job.notification-mail-password=mypassword
+##kylin.job.notification-mail-sender=kylin@example.com
+kylin.job.scheduler.provider.100=org.apache.kylin.job.impl.curator.CuratorScheduler
+kylin.job.scheduler.default=100
+#
+#### ENGINE ###
+#
+## Time interval to check hadoop job status
+#kylin.engine.mr.yarn-check-interval-seconds=10
+#
+#kylin.engine.mr.reduce-input-mb=500
+#
+#kylin.engine.mr.max-reducer-number=500
+#
+#kylin.engine.mr.mapper-input-rows=1000000
+#
+## Enable dictionary building in MR reducer
+#kylin.engine.mr.build-dict-in-reducer=true
+#
+## Number of reducers for fetching UHC column distinct values
+#kylin.engine.mr.uhc-reducer-count=3
+#
+## Whether using an additional step to build UHC dictionary
+#kylin.engine.mr.build-uhc-dict-in-additional-step=false
+#
+#
+#### CUBE | DICTIONARY ###
+#
+#kylin.cube.cuboid-scheduler=org.apache.kylin.cube.cuboid.DefaultCuboidScheduler
+#kylin.cube.segment-advisor=org.apache.kylin.cube.CubeSegmentAdvisor
+#
+## 'auto', 'inmem', 'layer' or 'random' for testing 
+#kylin.cube.algorithm=layer
+#
+## A smaller threshold prefers layer, a larger threshold prefers in-mem
+#kylin.cube.algorithm.layer-or-inmem-threshold=7
+#
+## auto use inmem algorithm:
+## 1, cube planner optimize job
+## 2, no source record
+#kylin.cube.algorithm.inmem-auto-optimize=true
+#
+#kylin.cube.aggrgroup.max-combination=32768
+#
+#kylin.snapshot.max-mb=300
+#
+#kylin.cube.cubeplanner.enabled=true
+#kylin.cube.cubeplanner.enabled-for-existing-cube=true
+#kylin.cube.cubeplanner.expansion-threshold=15.0
+#kylin.cube.cubeplanner.recommend-cache-max-size=200
+#kylin.cube.cubeplanner.mandatory-rollup-threshold=1000
+#kylin.cube.cubeplanner.algorithm-threshold-greedy=8
+#kylin.cube.cubeplanner.algorithm-threshold-genetic=23
+#
+#
+#### QUERY ###
+#
+## Controls the maximum number of bytes a query is allowed to scan storage.
+## The default value 0 means no limit.
+## The counterpart kylin.storage.partition.max-scan-bytes sets the maximum per coprocessor.
+#kylin.query.max-scan-bytes=0
+#
+kylin.query.cache-enabled=true
+#
+## Controls extras properties for Calcite jdbc driver
+## all extras properties should undder prefix "kylin.query.calcite.extras-props."
+## case sensitive, default: true, to enable case insensitive set it to false
+## @see org.apache.calcite.config.CalciteConnectionProperty.CASE_SENSITIVE
+#kylin.query.calcite.extras-props.caseSensitive=true
+## how to handle unquoted identity, defualt: TO_UPPER, available options: UNCHANGED, TO_UPPER, TO_LOWER
+## @see org.apache.calcite.config.CalciteConnectionProperty.UNQUOTED_CASING
+#kylin.query.calcite.extras-props.unquotedCasing=TO_UPPER
+## quoting method, default: DOUBLE_QUOTE, available options: DOUBLE_QUOTE, BACK_TICK, BRACKET
+## @see org.apache.calcite.config.CalciteConnectionProperty.QUOTING
+#kylin.query.calcite.extras-props.quoting=DOUBLE_QUOTE
+## change SqlConformance from DEFAULT to LENIENT to enable group by ordinal
+## @see org.apache.calcite.sql.validate.SqlConformance.SqlConformanceEnum
+#kylin.query.calcite.extras-props.conformance=LENIENT
+#
+## TABLE ACL
+#kylin.query.security.table-acl-enabled=true
+#
+## Usually should not modify this
+#kylin.query.interceptors=org.apache.kylin.rest.security.TableInterceptor
+#
+#kylin.query.escape-default-keyword=false
+#
+## Usually should not modify this
+#kylin.query.transformers=org.apache.kylin.query.util.DefaultQueryTransformer,org.apache.kylin.query.util.KeywordDefaultDirtyHack
+#
+#### SECURITY ###
+#
+## Spring security profile, options: testing, ldap, saml
+## with "testing" profile, user can use pre-defined name/pwd like KYLIN/ADMIN to login
+#kylin.security.profile=testing
+#
+## Admin roles in LDAP, for ldap and saml
+#kylin.security.acl.admin-role=admin
+#
+## LDAP authentication configuration
+#kylin.security.ldap.connection-server=ldap://ldap_server:389
+#kylin.security.ldap.connection-username=
+#kylin.security.ldap.connection-password=
+#
+## LDAP user account directory;
+#kylin.security.ldap.user-search-base=
+#kylin.security.ldap.user-search-pattern=
+#kylin.security.ldap.user-group-search-base=
+#kylin.security.ldap.user-group-search-filter=(|(member={0})(memberUid={1}))
+#
+## LDAP service account directory
+#kylin.security.ldap.service-search-base=
+#kylin.security.ldap.service-search-pattern=
+#kylin.security.ldap.service-group-search-base=
+#
+### SAML configurations for SSO
+## SAML IDP metadata file location
+#kylin.security.saml.metadata-file=classpath:sso_metadata.xml
+#kylin.security.saml.metadata-entity-base-url=https://hostname/kylin
+#kylin.security.saml.keystore-file=classpath:samlKeystore.jks
+#kylin.security.saml.context-scheme=https
+#kylin.security.saml.context-server-name=hostname
+#kylin.security.saml.context-server-port=443
+#kylin.security.saml.context-path=/kylin
+#
+#### SPARK ENGINE CONFIGS ###
+#
+## Hadoop conf folder, will export this as "HADOOP_CONF_DIR" to run spark-submit
+## This must contain site xmls of core, yarn, hive, and hbase in one folder
+##kylin.env.hadoop-conf-dir=/etc/hadoop/conf
+#
+## Estimate the RDD partition numbers
+#kylin.engine.spark.rdd-partition-cut-mb=10
+#
+## Minimal partition numbers of rdd
+#kylin.engine.spark.min-partition=1
+#
+## Max partition numbers of rdd
+#kylin.engine.spark.max-partition=5000
+#
+## Spark conf (default is in spark/conf/spark-defaults.conf)
+kylin.engine.spark-conf.spark.master=yarn
+## Use cluster as deploy mode
+kylin.engine.spark-conf.spark.submit.deployMode=cluster
+#kylin.engine.spark-conf.spark.yarn.queue=default
+#kylin.engine.spark-conf.spark.driver.memory=2G
+#kylin.engine.spark-conf.spark.executor.memory=4G
+#kylin.engine.spark-conf.spark.executor.instances=40
+#kylin.engine.spark-conf.spark.yarn.executor.memoryOverhead=1024
+#kylin.engine.spark-conf.spark.shuffle.service.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.enabled=true
+#kylin.engine.spark-conf.spark.eventLog.dir=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.history.fs.logDirectory=hdfs\:///kylin/spark-history
+#kylin.engine.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false
+#
+#### Spark conf for specific job
+#kylin.engine.spark-conf-mergedict.spark.executor.memory=6G
+#kylin.engine.spark-conf-mergedict.spark.memory.fraction=0.2
+#
+## manually upload spark-assembly jar to HDFS and then set this property will avoid repeatedly uploading jar at runtime
+##kylin.engine.spark-conf.spark.yarn.archive=hdfs://namenode:8020/kylin/spark/spark-libs.jar
+##kylin.engine.spark-conf.spark.io.compression.codec=org.apache.spark.io.SnappyCompressionCodec
+#
+## uncomment for HDP
+##kylin.engine.spark-conf.spark.driver.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current
+##kylin.engine.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current
+#
+#
+#### QUERY PUSH DOWN ###
+#
+##kylin.query.pushdown.runner-class-name=org.apache.kylin.query.adhoc.PushDownRunnerJdbcImpl
+#
+##kylin.query.pushdown.update-enabled=false
+##kylin.query.pushdown.jdbc.url=jdbc:hive2://sandbox:10000/default
+##kylin.query.pushdown.jdbc.driver=org.apache.hive.jdbc.HiveDriver
+##kylin.query.pushdown.jdbc.username=hive
+##kylin.query.pushdown.jdbc.password=
+#
+##kylin.query.pushdown.jdbc.pool-max-total=8
+##kylin.query.pushdown.jdbc.pool-max-idle=8
+##kylin.query.pushdown.jdbc.pool-min-idle=0
+#
+#### JDBC Data Source
+##kylin.source.jdbc.connection-url=
+##kylin.source.jdbc.driver=
+##kylin.source.jdbc.dialect=
+##kylin.source.jdbc.user=
+##kylin.source.jdbc.pass=
+##kylin.source.jdbc.sqoop-home=
+##kylin.source.jdbc.filed-delimiter=|
+#
+#### Livy with Kylin
+##kylin.engine.livy-conf.livy-enabled=false
+##kylin.engine.livy-conf.livy-url=http://LivyHost:8998
+##kylin.engine.livy-conf.livy-key.file=hdfs:///path-to-kylin-job-jar
+##kylin.engine.livy-conf.livy-arr.jars=hdfs:///path-to-hadoop-dependency-jar
+#
+#
+#### Realtime OLAP ###
+#
+## Where should local segment cache located, for absolute path, the real path will be ${KYLIN_HOME}/${kylin.stream.index.path}
+#kylin.stream.index.path=stream_index
+#
+## The timezone for Derived Time Column like hour_start, try set to GMT+N, please check detail at KYLIN-4010
+#kylin.stream.event.timezone=
+#
+## Debug switch for print realtime global dict encode information, please check detail at KYLIN-4141
+#kylin.stream.print-realtime-dict-enabled=false
+#
+## Should enable latest coordinator, please check detail at KYLIN-4167
+#kylin.stream.new.coordinator-enabled=true
+#
+## In which way should we collect receiver's metrics info
+##kylin.stream.metrics.option=console/csv/jmx
+#
+## When enable a streaming cube, should cousme from earliest offset or least offset
+#kylin.stream.consume.offsets.latest=true
+#
+## The parallelism of scan in receiver side
+#kylin.stream.receiver.use-threads-per-query=8
+#
+## How coordinator/receiver register itself into StreamMetadata, there are three option:
+## 1. hostname:port, then kylin will set the config ip and port as the currentNode;
+## 2. port, then kylin will get the node's hostname and append port as the currentNode;
+## 3. not set, then kylin will get the node hostname address and set the hostname and defaultPort(7070 for coordinator or 9090 for receiver) as the currentNode.
+##kylin.stream.node=
+#
+## Auto resubmit after job be discarded
+#kylin.stream.auto-resubmit-after-discard-enabled=true
diff --git a/kubernetes/example/config/kylin-job/kylin_hive_conf.xml b/kubernetes/example/config/kylin-job/kylin_hive_conf.xml
new file mode 100644
index 0000000..f01d08e
--- /dev/null
+++ b/kubernetes/example/config/kylin-job/kylin_hive_conf.xml
@@ -0,0 +1,102 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>hive.exec.compress.output</name>
+        <value>true</value>
+        <description>Enable compress</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join</name>
+        <value>true</value>
+        <description>Enables the optimization about converting common join into mapjoin</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join.noconditionaltask</name>
+        <value>true</value>
+        <description>enable map-side join</description>
+    </property>
+
+    <property>
+        <name>hive.auto.convert.join.noconditionaltask.size</name>
+        <value>100000000</value>
+        <description>enable map-side join</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description></description>
+    </property>
+    -->
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description></description>
+    </property>
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    -->
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>hive.stats.autogather</name>
+        <value>true</value>
+        <description>Collect statistics for newly created intermediate table</description>
+    </property>
+
+    <property>
+        <name>hive.merge.mapfiles</name>
+        <value>false</value>
+        <description>Disable Hive's auto merge</description>
+    </property>
+
+    <property>
+        <name>hive.merge.mapredfiles</name>
+        <value>false</value>
+        <description>Disable Hive's auto merge</description>
+    </property>
+</configuration>
diff --git a/kubernetes/example/config/kylin-job/kylin_job_conf.xml b/kubernetes/example/config/kylin-job/kylin_job_conf.xml
new file mode 100644
index 0000000..17a9145
--- /dev/null
+++ b/kubernetes/example/config/kylin-job/kylin_job_conf.xml
@@ -0,0 +1,88 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>3600000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+</configuration>
diff --git a/kubernetes/example/config/kylin-job/kylin_job_conf_cube_merge.xml b/kubernetes/example/config/kylin-job/kylin_job_conf_cube_merge.xml
new file mode 100644
index 0000000..79365ad
--- /dev/null
+++ b/kubernetes/example/config/kylin-job/kylin_job_conf_cube_merge.xml
@@ -0,0 +1,104 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>7200000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+    <!--Additional config for cube merge job, giving more memory -->
+    <property>
+        <name>mapreduce.map.memory.mb</name>
+        <value>3072</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.java.opts</name>
+        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.io.sort.mb</name>
+        <value>200</value>
+        <description></description>
+    </property>
+</configuration>
diff --git a/kubernetes/example/config/kylin-job/kylin_job_conf_inmem.xml b/kubernetes/example/config/kylin-job/kylin_job_conf_inmem.xml
new file mode 100644
index 0000000..ddda4dd
--- /dev/null
+++ b/kubernetes/example/config/kylin-job/kylin_job_conf_inmem.xml
@@ -0,0 +1,111 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>mapreduce.job.is-mem-hungry</name>
+        <value>true</value>
+    </property>
+
+    <property>
+        <name>mapreduce.job.split.metainfo.maxsize</name>
+        <value>-1</value>
+        <description>The maximum permissible size of the split metainfo file.
+            The JobTracker won't attempt to read split metainfo files bigger than
+            the configured value. No limits if set to -1.
+        </description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.output.compress</name>
+        <value>true</value>
+        <description>Compress map outputs</description>
+    </property>
+
+    <!--
+    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.map.output.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for map outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress</name>
+        <value>true</value>
+        <description>Compress the output of a MapReduce job</description>
+    </property>
+    <!--
+    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
+    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
+    -->
+    <!--
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.codec</name>
+        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
+        <description>The compression codec to use for job outputs
+        </description>
+    </property>
+    -->
+    <property>
+        <name>mapreduce.output.fileoutputformat.compress.type</name>
+        <value>BLOCK</value>
+        <description>The compression type to use for job outputs</description>
+    </property>
+
+
+    <property>
+        <name>mapreduce.job.max.split.locations</name>
+        <value>2000</value>
+        <description>No description</description>
+    </property>
+
+    <property>
+        <name>dfs.replication</name>
+        <value>2</value>
+        <description>Block replication</description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.timeout</name>
+        <value>7200000</value>
+        <description>Set task timeout to 1 hour</description>
+    </property>
+
+    <!--Additional config for in-mem cubing, giving mapper more memory -->
+    <property>
+        <name>mapreduce.map.memory.mb</name>
+        <value>3072</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.map.java.opts</name>
+        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
+        <description></description>
+    </property>
+
+    <property>
+        <name>mapreduce.task.io.sort.mb</name>
+        <value>200</value>
+        <description></description>
+    </property>
+
+</configuration>
diff --git a/kubernetes/example/config/kylin-job/setenv-tool.sh b/kubernetes/example/config/kylin-job/setenv-tool.sh
new file mode 100644
index 0000000..487b5ef
--- /dev/null
+++ b/kubernetes/example/config/kylin-job/setenv-tool.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# source me
+
+# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
+# uncomment following to for it to take effect
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx4096M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+# See HADOOP-7154.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
+
+# uncomment following to for it to take effect(the values need adjusting to fit your env)
+# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
+if [[ -d "/usr/hdp/current/hadoop-client" ]]
+then
+   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
+   # attempt to locate JVM native libraries and set corresponding property
+   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
+   then
+      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
+   fi
+else
+   export KYLIN_EXTRA_START_OPTS=""
+   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
+   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
+fi
+
+if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
+then
+    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
+fi
+
+if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
+then
+    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
+fi
+
+if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
+then
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
+fi
diff --git a/kubernetes/example/config/kylin-job/setenv.sh b/kubernetes/example/config/kylin-job/setenv.sh
new file mode 100644
index 0000000..fa88769
--- /dev/null
+++ b/kubernetes/example/config/kylin-job/setenv.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# source me
+
+# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
+# uncomment following to for it to take effect
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx2048M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+
+# Newer versions of glibc use an arena memory allocator that causes virtual
+# memory usage to explode. Tune the variable down to prevent vmem explosion.
+# See HADOOP-7154.
+export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+
+# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
+
+# uncomment following to for it to take effect(the values need adjusting to fit your env)
+# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
+if [[ -d "/usr/hdp/current/hadoop-client" ]]
+then
+   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
+   # attempt to locate JVM native libraries and set corresponding property
+   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
+   then
+      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
+   fi
+else
+   export KYLIN_EXTRA_START_OPTS=""
+   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
+   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
+fi
+
+if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
+then
+    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
+fi
+
+if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
+then
+    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
+fi
+
+if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
+then
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
+    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
+else
+    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
+fi
diff --git a/kubernetes/example/config/kylin-more/applicationContext.xml b/kubernetes/example/config/kylin-more/applicationContext.xml
new file mode 100644
index 0000000..5397044
--- /dev/null
+++ b/kubernetes/example/config/kylin-more/applicationContext.xml
@@ -0,0 +1,124 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns:context="http://www.springframework.org/schema/context"
+       xmlns:mvc="http://www.springframework.org/schema/mvc"
+       xmlns:aop="http://www.springframework.org/schema/aop"
+       xmlns:cache="http://www.springframework.org/schema/cache"
+       xmlns:p="http://www.springframework.org/schema/p"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans
+    http://www.springframework.org/schema/beans/spring-beans-4.3.xsd
+    http://www.springframework.org/schema/context
+    http://www.springframework.org/schema/context/spring-context-4.3.xsd
+
+
+    http://www.springframework.org/schema/mvc
+    http://www.springframework.org/schema/mvc/spring-mvc-4.3.xsd
+    http://www.springframework.org/schema/aop
+    http://www.springframework.org/schema/aop/spring-aop-4.3.xsd
+    http://www.springframework.org/schema/cache
+    http://www.springframework.org/schema/cache/spring-cache.xsd">
+
+    <description>Kylin Rest Service</description>
+    <context:annotation-config/>
+    <mvc:annotation-driven/>
+    <aop:aspectj-autoproxy/>
+
+    <bean class="org.apache.kylin.rest.init.InitialTaskManager"/>
+
+    <context:component-scan base-package="org.apache.kylin.rest"/>
+
+    <bean class="org.apache.kylin.rest.security.PasswordPlaceholderConfigurer">
+        <property name="ignoreResourceNotFound" value="true"/>
+    </bean>
+
+
+    <!-- Rest service binding -->
+    <bean class="org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerMapping"/>
+
+    <bean id="mappingJacksonHttpMessageConverter"
+          class="org.springframework.http.converter.json.MappingJackson2HttpMessageConverter"/>
+    <bean id="stringHttpMessageConverter"
+          class="org.springframework.http.converter.StringHttpMessageConverter"/>
+    <bean id="formHttpMessageConverter"
+          class="org.springframework.http.converter.FormHttpMessageConverter"/>
+
+    <bean class="org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter">
+        <property name="messageConverters">
+            <list>
+                <ref bean="mappingJacksonHttpMessageConverter"/>
+                <ref bean="stringHttpMessageConverter"/>
+                <ref bean="formHttpMessageConverter"/>
+            </list>
+        </property>
+    </bean>
+
+    <bean class="org.springframework.web.servlet.view.ContentNegotiatingViewResolver">
+        <property name="viewResolvers">
+            <list>
+                <bean class="org.springframework.web.servlet.view.BeanNameViewResolver"/>
+                <bean class="org.springframework.web.servlet.view.InternalResourceViewResolver">
+                    <!-- <property name="prefix" value="/WEB-INF/jsp/"/> -->
+                    <property name="suffix" value=".jsp"/>
+                </bean>
+            </list>
+        </property>
+        <property name="defaultViews">
+            <list>
+                <bean class="org.springframework.web.servlet.view.json.MappingJackson2JsonView"/>
+            </list>
+        </property>
+    </bean>
+    <!-- Rest service binding -->
+
+    <!-- Cache Config -->
+    <cache:annotation-driven/>
+
+    <beans profile="ldap,saml">
+        <bean id="ehcache"
+              class="org.springframework.cache.ehcache.EhCacheManagerFactoryBean"
+              p:configLocation="classpath:ehcache.xml" p:shared="true"/>
+
+        <bean id="cacheManager" class="org.springframework.cache.ehcache.EhCacheCacheManager"
+              p:cacheManager-ref="ehcache"/>
+    </beans>
+    <beans profile="testing">
+        <!--
+        <bean id="ehcache"
+              class="org.springframework.cache.ehcache.EhCacheManagerFactoryBean"
+              p:configLocation="classpath:ehcache-test.xml" p:shared="true"/>
+
+        <bean id="cacheManager" class="org.springframework.cache.ehcache.EhCacheCacheManager"
+              p:cacheManager-ref="ehcache"/>
+        -->
+
+
+        <bean id="ehcache" class="org.springframework.cache.ehcache.EhCacheManagerFactoryBean"
+              p:configLocation="classpath:ehcache-test.xml" p:shared="true"/>
+
+        <bean id="remoteCacheManager" class="org.apache.kylin.cache.cachemanager.MemcachedCacheManager"/>
+        <bean id="localCacheManager" class="org.apache.kylin.cache.cachemanager.InstrumentedEhCacheCacheManager"
+              p:cacheManager-ref="ehcache"/>
+        <bean id="cacheManager" class="org.apache.kylin.cache.cachemanager.RemoteLocalFailOverCacheManager"/>
+
+        <bean id="memcachedCacheConfig" class="org.apache.kylin.cache.memcached.MemcachedCacheConfig">
+            <property name="timeout" value="500"/>
+            <property name="hosts" value="${kylin.cache.memcached.hosts}"/>
+        </bean>
+
+    </beans>
+
+</beans>
\ No newline at end of file
diff --git a/kubernetes/example/config/kylin-more/ehcache-test.xml b/kubernetes/example/config/kylin-more/ehcache-test.xml
new file mode 100644
index 0000000..5bd4d13
--- /dev/null
+++ b/kubernetes/example/config/kylin-more/ehcache-test.xml
@@ -0,0 +1,30 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<ehcache maxBytesLocalHeap="256M">>
+    <cache name="StorageCache"
+           eternal="false"
+           timeToIdleSeconds="86400"
+           memoryStoreEvictionPolicy="LRU"
+            >
+        <persistence strategy="none"/>
+    </cache>
+    <cache name="ExceptionQueryCache"
+           eternal="false"
+           timeToIdleSeconds="86400"
+           memoryStoreEvictionPolicy="LRU"
+            >
+        <persistence strategy="none"/>
+    </cache>
+</ehcache>
diff --git a/kubernetes/example/config/kylin-more/ehcache.xml b/kubernetes/example/config/kylin-more/ehcache.xml
new file mode 100644
index 0000000..c9efc13
--- /dev/null
+++ b/kubernetes/example/config/kylin-more/ehcache.xml
@@ -0,0 +1,30 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<ehcache maxBytesLocalHeap="2048M">
+    <cache name="StorageCache"
+           eternal="false"
+           timeToIdleSeconds="86400"
+           memoryStoreEvictionPolicy="LRU"
+            >
+        <persistence strategy="none"/>
+    </cache>
+    <cache name="ExceptionQueryCache"
+           eternal="false"
+           timeToIdleSeconds="86400"
+           memoryStoreEvictionPolicy="LRU"
+            >
+        <persistence strategy="none"/>
+    </cache>
+</ehcache>
diff --git a/kubernetes/example/config/kylin-more/kylinMetrics.xml b/kubernetes/example/config/kylin-more/kylinMetrics.xml
new file mode 100644
index 0000000..843fb91
--- /dev/null
+++ b/kubernetes/example/config/kylin-more/kylinMetrics.xml
@@ -0,0 +1,86 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<beans xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns="http://www.springframework.org/schema/beans"
+       xsi:schemaLocation="http://www.springframework.org/schema/beans
+            http://www.springframework.org/schema/beans/spring-beans-3.1.xsd">
+
+    <description>Kylin Metrics Related Configuration (SystemCube)</description>
+
+    <!-- A Reservoir which don't staged metrics message at all, emit it in no time. Maybe good for debug purpose.-->
+    <bean id="instantReservoir" class="org.apache.kylin.metrics.lib.impl.InstantReservoir"/>
+
+    <!-- A Reservoir which staged metrics message in memory, and emit them in fixed rate. -->
+    <bean id="blockingReservoir" class="org.apache.kylin.metrics.lib.impl.BlockingReservoir">
+        <!-- minReportSize, only if currently count of staged message exceed minReportSize, will Reservoir try to write message-->
+        <constructor-arg index="0">
+            <value>100</value>
+        </constructor-arg>
+
+        <!-- maxReportSize, max size of report in one time -->
+        <constructor-arg index="1">
+            <value>500</value>
+        </constructor-arg>
+
+        <!-- minReportTime, min duration(in minute) between two report action-->
+        <constructor-arg index="2">
+            <value>10</value>
+        </constructor-arg>
+    </bean>
+
+    <bean id="hiveSink" class="org.apache.kylin.metrics.lib.impl.hive.HiveSink"/>
+
+    <bean id="kafkaSink" class="org.apache.kylin.metrics.lib.impl.kafka.KafkaSink"/>
+
+    <bean id="initMetricsManager" class="org.springframework.beans.factory.config.MethodInvokingFactoryBean">
+        <property name="targetClass" value="org.apache.kylin.metrics.MetricsManager"/>
+        <property name="targetMethod" value="initMetricsManager"/>
+        <property name="arguments">
+            <list>
+                <ref bean="hiveSink"/>
+                <map key-type="org.apache.kylin.metrics.lib.ActiveReservoir" value-type="java.util.List">
+                    <!--
+                    <entry key-ref="instantReservoir">
+                        <list>
+                            <bean class="org.apache.kylin.common.util.Pair">
+                                <property name="first"
+                                          value="org.apache.kylin.metrics.lib.impl.kafka.KafkaReservoirReporter"/>
+                                <property name="second">
+                                    <props>
+                                        <prop key="bootstrap.servers">sandbox:9092</prop>
+                                    </props>
+                                </property>
+                            </bean>
+                        </list>
+                    </entry>
+                    -->
+                    <entry key-ref="blockingReservoir">
+                        <list>
+                            <bean class="org.apache.kylin.common.util.Pair">
+                                <property name="first"
+                                          value="org.apache.kylin.metrics.lib.impl.hive.HiveReservoirReporter"/>
+                                <property name="second">
+                                    <props>
+                                    </props>
+                                </property>
+                            </bean>
+                        </list>
+                    </entry>
+                </map>
+            </list>
+        </property>
+    </bean>
+
+</beans>
\ No newline at end of file
diff --git a/kubernetes/example/config/kylin-more/kylinSecurity.xml b/kubernetes/example/config/kylin-more/kylinSecurity.xml
new file mode 100644
index 0000000..6116433
--- /dev/null
+++ b/kubernetes/example/config/kylin-more/kylinSecurity.xml
@@ -0,0 +1,634 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:scr="http://www.springframework.org/schema/security"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns:context="http://www.springframework.org/schema/context"
+       xmlns:util="http://www.springframework.org/schema/util" xsi:schemaLocation="http://www.springframework.org/schema/beans
+	http://www.springframework.org/schema/beans/spring-beans-4.3.xsd
+	http://www.springframework.org/schema/security
+	http://www.springframework.org/schema/security/spring-security-4.2.xsd
+	http://www.springframework.org/schema/util
+	http://www.springframework.org/schema/util/spring-util-4.3.xsd
+    http://www.springframework.org/schema/context
+    http://www.springframework.org/schema/context/spring-context.xsd">
+
+    <scr:global-method-security pre-post-annotations="enabled">
+        <scr:expression-handler ref="expressionHandler"/>
+    </scr:global-method-security>
+
+
+    <!-- acl config -->
+    <bean id="aclPermissionFactory" class="org.apache.kylin.rest.security.AclPermissionFactory"/>
+
+    <bean id="expressionHandler"
+          class="org.springframework.security.access.expression.method.DefaultMethodSecurityExpressionHandler">
+        <property name="permissionEvaluator" ref="permissionEvaluator"/>
+    </bean>
+
+    <bean id="permissionEvaluator" class="org.apache.kylin.rest.security.KylinAclPermissionEvaluator">
+        <constructor-arg ref="aclService"/>
+        <constructor-arg ref="aclPermissionFactory"/>
+    </bean>
+
+    <bean id="aclAuthorizationStrategy"
+          class="org.springframework.security.acls.domain.AclAuthorizationStrategyImpl">
+        <constructor-arg>
+            <list>
+                <bean class="org.springframework.security.core.authority.SimpleGrantedAuthority">
+                    <constructor-arg value="ROLE_ADMIN"/>
+                </bean>
+                <bean class="org.springframework.security.core.authority.SimpleGrantedAuthority">
+                    <constructor-arg value="ROLE_ADMIN"/>
+                </bean>
+                <bean class="org.springframework.security.core.authority.SimpleGrantedAuthority">
+                    <constructor-arg value="ROLE_ADMIN"/>
+                </bean>
+            </list>
+        </constructor-arg>
+    </bean>
+
+    <bean id="auditLogger"
+          class="org.springframework.security.acls.domain.ConsoleAuditLogger"/>
+
+    <bean id="permissionGrantingStrategy"
+          class="org.springframework.security.acls.domain.DefaultPermissionGrantingStrategy">
+        <constructor-arg ref="auditLogger"/>
+    </bean>
+
+    <bean id="userService" class="org.apache.kylin.rest.service.KylinUserService"/>
+
+    <bean id="userGroupService" class="org.apache.kylin.rest.service.KylinUserGroupService"/>
+
+    <beans profile="ldap,saml">
+        <bean id="ldapSource"
+              class="org.springframework.security.ldap.DefaultSpringSecurityContextSource">
+            <constructor-arg value="${kylin.security.ldap.connection-server}"/>
+            <property name="userDn" value="${kylin.security.ldap.connection-username}"/>
+            <property name="password" value="${kylin.security.ldap.connection-password}"/>
+        </bean>
+
+        <bean id="kylinUserAuthProvider"
+              class="org.apache.kylin.rest.security.KylinAuthenticationProvider">
+            <constructor-arg>
+                <bean id="ldapUserAuthenticationProvider"
+                      class="org.springframework.security.ldap.authentication.LdapAuthenticationProvider">
+                    <constructor-arg>
+                        <bean class="org.springframework.security.ldap.authentication.BindAuthenticator">
+                            <constructor-arg ref="ldapSource"/>
+                            <property name="userSearch">
+                                <bean id="userSearch"
+                                      class="org.springframework.security.ldap.search.FilterBasedLdapUserSearch">
+                                    <constructor-arg index="0"
+                                                     value="${kylin.security.ldap.user-search-base}"/>
+                                    <constructor-arg index="1"
+                                                     value="${kylin.security.ldap.user-search-pattern}"/>
+                                    <constructor-arg index="2" ref="ldapSource"/>
+                                </bean>
+                            </property>
+                        </bean>
+                    </constructor-arg>
+                    <constructor-arg>
+                        <bean class="org.apache.kylin.rest.security.LDAPAuthoritiesPopulator">
+                            <constructor-arg index="0" ref="ldapSource"/>
+                            <constructor-arg index="1"
+                                             value="${kylin.security.ldap.user-group-search-base}"/>
+                            <constructor-arg index="2" value="${kylin.security.acl.admin-role}"/>
+                            <property name="groupSearchFilter" value="${kylin.security.ldap.user-group-search-filter}"/>
+                        </bean>
+                    </constructor-arg>
+                </bean>
+            </constructor-arg>
+        </bean>
+
+        <bean id="kylinServiceAccountAuthProvider"
+              class="org.apache.kylin.rest.security.KylinAuthenticationProvider">
+            <constructor-arg>
+                <bean id="ldapServiceAuthenticationProvider"
+                      class="org.springframework.security.ldap.authentication.LdapAuthenticationProvider">
+                    <constructor-arg>
+                        <bean class="org.springframework.security.ldap.authentication.BindAuthenticator">
+                            <constructor-arg ref="ldapSource"/>
+                            <property name="userSearch">
+                                <bean id="userSearch"
+                                      class="org.springframework.security.ldap.search.FilterBasedLdapUserSearch">
+                                    <constructor-arg index="0"
+                                                     value="${kylin.security.ldap.service-search-base}"/>
+                                    <constructor-arg index="1"
+                                                     value="${kylin.security.ldap.service-search-pattern}"/>
+                                    <constructor-arg index="2" ref="ldapSource"/>
+                                </bean>
+                            </property>
+                        </bean>
+                    </constructor-arg>
+                    <constructor-arg>
+                        <bean class="org.apache.kylin.rest.security.LDAPAuthoritiesPopulator">
+                            <constructor-arg index="0" ref="ldapSource"/>
+                            <constructor-arg index="1"
+                                             value="${kylin.security.ldap.service-group-search-base}"/>
+                            <constructor-arg index="2" value="${kylin.security.acl.admin-role}"/>
+                            <property name="groupSearchFilter" value="${kylin.security.ldap.user-group-search-filter}"/>
+                        </bean>
+                    </constructor-arg>
+                </bean>
+            </constructor-arg>
+        </bean>
+
+    </beans>
+
+    <beans profile="ldap">
+        <scr:authentication-manager alias="ldapAuthenticationManager">
+            <!-- do user ldap auth -->
+            <scr:authentication-provider ref="kylinUserAuthProvider"></scr:authentication-provider>
+
+            <!-- do service account ldap auth -->
+            <scr:authentication-provider
+                    ref="kylinServiceAccountAuthProvider"></scr:authentication-provider>
+        </scr:authentication-manager>
+
+    </beans>
+
+
+    <beans profile="testing">
+        <util:list id="adminAuthorities"
+                   value-type="org.springframework.security.core.authority.SimpleGrantedAuthority">
+            <value>ROLE_ADMIN</value>
+            <value>ROLE_MODELER</value>
+            <value>ROLE_ANALYST</value>
+        </util:list>
+        <util:list id="modelerAuthorities"
+                   value-type="org.springframework.security.core.authority.SimpleGrantedAuthority">
+            <value>ROLE_MODELER</value>
+            <value>ROLE_ANALYST</value>
+        </util:list>
+        <util:list id="analystAuthorities"
+                   value-type="org.springframework.security.core.authority.SimpleGrantedAuthority">
+            <value>ROLE_ANALYST</value>
+        </util:list>
+
+        <bean class="org.springframework.security.core.userdetails.User" id="adminUser">
+            <constructor-arg value="ADMIN"/>
+            <constructor-arg
+                    value="$2a$10$o3ktIWsGYxXNuUWQiYlZXOW5hWcqyNAFQsSSCSEWoC/BRVMAUjL32"/>
+            <constructor-arg ref="adminAuthorities"/>
+        </bean>
+        <bean class="org.springframework.security.core.userdetails.User" id="modelerUser">
+            <constructor-arg value="MODELER"/>
+            <constructor-arg
+                    value="$2a$10$Le5ernTeGNIARwMJsY0WaOLioNQdb0QD11DwjeyNqqNRp5NaDo2FG"/>
+            <constructor-arg ref="modelerAuthorities"/>
+        </bean>
+        <bean class="org.springframework.security.core.userdetails.User" id="analystUser">
+            <constructor-arg value="ANALYST"/>
+            <constructor-arg
+                    value="$2a$10$s4INO3XHjPP5Vm2xH027Ce9QeXWdrfq5pvzuGr9z/lQmHqi0rsbNi"/>
+            <constructor-arg ref="analystAuthorities"/>
+        </bean>
+
+        <bean id="kylinUserAuthProvider"
+              class="org.apache.kylin.rest.security.KylinAuthenticationProvider">
+            <constructor-arg>
+                <bean class="org.springframework.security.authentication.dao.DaoAuthenticationProvider">
+                    <property name="userDetailsService">
+                        <bean class="org.apache.kylin.rest.service.KylinUserService">
+                            <constructor-arg>
+                                <util:list
+                                        value-type="org.springframework.security.core.userdetails.User">
+                                    <ref bean="adminUser"></ref>
+                                    <ref bean="modelerUser"></ref>
+                                    <ref bean="analystUser"></ref>
+                                </util:list>
+                            </constructor-arg>
+                        </bean>
+                    </property>
+
+                    <property name="passwordEncoder" ref="passwordEncoder"></property>
+                </bean>
+            </constructor-arg>
+        </bean>
+
+        <!-- user auth -->
+        <bean id="passwordEncoder"
+              class="org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder"/>
+
+        <scr:authentication-manager alias="testingAuthenticationManager">
+            <!-- do user ldap auth -->
+            <scr:authentication-provider ref="kylinUserAuthProvider"></scr:authentication-provider>
+        </scr:authentication-manager>
+    </beans>
+
+
+    <beans profile="testing,ldap">
+        <scr:http auto-config="true" use-expressions="true">
+            <scr:csrf disabled="true"/>
+            <scr:http-basic entry-point-ref="unauthorisedEntryPoint"/>
+
+            <scr:intercept-url pattern="/api/user/authentication*/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/query/runningQueries" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/query/*/stop" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/query*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/metadata*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/**/metrics" access="permitAll"/>
+            <scr:intercept-url pattern="/api/cache*/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/streaming_coordinator/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/service_discovery/state/is_active_job_node" access="permitAll"/>
+            <scr:intercept-url pattern="/api/cubes/src/tables" access="hasAnyRole('ROLE_ANALYST')"/>
+            <scr:intercept-url pattern="/api/cubes*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/models*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/streaming*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/job*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/admin/public_config" access="permitAll"/>
+            <scr:intercept-url pattern="/api/admin/version" access="permitAll"/>
+            <scr:intercept-url pattern="/api/projects" access="permitAll"/>
+            <scr:intercept-url pattern="/api/admin*/**" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/tables/**/snapshotLocalCache/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/**" access="isAuthenticated()"/>
+
+            <scr:form-login login-page="/login"/>
+            <scr:logout invalidate-session="true" delete-cookies="JSESSIONID" logout-url="/j_spring_security_logout"
+                        logout-success-url="/."/>
+            <scr:session-management session-fixation-protection="newSession"/>
+        </scr:http>
+    </beans>
+
+    <beans profile="saml">
+        <!-- Enable auto-wiring -->
+        <context:annotation-config/>
+
+        <!-- Scan for auto-wiring classes in spring saml packages -->
+        <context:component-scan base-package="org.springframework.security.saml"/>
+
+        <!-- Unsecured pages -->
+        <scr:http security="none" pattern="/image/**"/>
+        <scr:http security="none" pattern="/css/**"/>
+        <scr:http security="none" pattern="/less/**"/>
+        <scr:http security="none" pattern="/fonts/**"/>
+        <scr:http security="none" pattern="/js/**"/>
+        <scr:http security="none" pattern="/login/**"/>
+        <scr:http security="none" pattern="/routes.json"/>
+
+        <!-- Secured Rest API urls with LDAP basic authentication -->
+        <scr:http pattern="/api/**" use-expressions="true"
+                  authentication-manager-ref="apiAccessAuthenticationManager">
+            <scr:csrf disabled="true"/>
+            <scr:http-basic entry-point-ref="unauthorisedEntryPoint"/>
+
+            <scr:intercept-url pattern="/api/user/authentication*/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/query/runningQueries" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/query/*/stop" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/query*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/metadata*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/**/metrics" access="permitAll"/>
+            <scr:intercept-url pattern="/api/cache*/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/streaming_coordinator/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/cubes/src/tables" access="hasAnyRole('ROLE_ANALYST')"/>
+            <scr:intercept-url pattern="/api/cubes*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/models*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/streaming*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/job*/**" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/admin/config" access="permitAll"/>
+            <scr:intercept-url pattern="/api/projects*/*" access="isAuthenticated()"/>
+            <scr:intercept-url pattern="/api/admin*/**" access="hasRole('ROLE_ADMIN')"/>
+            <scr:intercept-url pattern="/api/tables/**/snapshotLocalCache/**" access="permitAll"/>
+            <scr:intercept-url pattern="/api/**" access="isAuthenticated()"/>
+
+            <scr:form-login login-page="/login"/>
+            <scr:logout invalidate-session="true" delete-cookies="JSESSIONID" logout-url="/j_spring_security_logout"
+                        logout-success-url="/."/>
+            <scr:session-management session-fixation-protection="newSession"/>
+        </scr:http>
+
+        <!-- Secured non-api urls with SAML SSO -->
+        <scr:http auto-config="true" entry-point-ref="samlEntryPoint" use-expressions="false"
+                  authentication-manager-ref="webAccessAuthenticationManager">
+            <scr:csrf disabled="true"/>
+            <scr:intercept-url pattern="/**" access="IS_AUTHENTICATED_FULLY"/>
+            <scr:custom-filter before="FIRST" ref="metadataGeneratorFilter"/>
+            <scr:custom-filter after="BASIC_AUTH_FILTER" ref="samlFilter"/>
+        </scr:http>
+
+
+        <!-- API authentication manager -->
+        <scr:authentication-manager id="apiAccessAuthenticationManager">
+            <scr:authentication-provider ref="kylinServiceAccountAuthProvider"/>
+            <scr:authentication-provider ref="kylinUserAuthProvider"/>
+        </scr:authentication-manager>
+
+
+        <!-- Web authentication manager -->
+        <scr:authentication-manager id="webAccessAuthenticationManager">
+            <scr:authentication-provider ref="kylinSAMLAuthenticationProvider"/>
+        </scr:authentication-manager>
+
+        <!-- Central storage of cryptographic keys -->
+        <bean id="keyManager" class="org.springframework.security.saml.key.JKSKeyManager">
+            <constructor-arg value="${kylin.security.saml.keystore-file}"/>
+            <constructor-arg type="java.lang.String" value="changeit"/>
+            <constructor-arg>
+                <map>
+                    <entry key="kylin" value="changeit"/>
+                </map>
+            </constructor-arg>
+            <constructor-arg type="java.lang.String" value="kylin"/>
+        </bean>
+
+        <!-- Filters for processing of SAML messages -->
+        <bean id="samlFilter" class="org.springframework.security.web.FilterChainProxy">
+            <scr:filter-chain-map request-matcher="ant">
+                <scr:filter-chain pattern="/saml/login/**" filters="samlEntryPoint"/>
+                <scr:filter-chain pattern="/saml/logout/**" filters="samlLogoutFilter"/>
+                <scr:filter-chain pattern="/saml/metadata/**" filters="metadataDisplayFilter"/>
+                <scr:filter-chain pattern="/saml/SSO/**" filters="samlWebSSOProcessingFilter"/>
+                <scr:filter-chain pattern="/saml/SSOHoK/**"
+                                  filters="samlWebSSOHoKProcessingFilter"/>
+                <scr:filter-chain pattern="/saml/SingleLogout/**"
+                                  filters="samlLogoutProcessingFilter"/>
+            </scr:filter-chain-map>
+        </bean>
+
+        <!-- Handler deciding where to redirect user after successful login -->
+        <bean id="successRedirectHandler"
+              class="org.springframework.security.web.authentication.SavedRequestAwareAuthenticationSuccessHandler">
+            <property name="defaultTargetUrl" value="/models"/>
+        </bean>
+
+        <!-- Handler deciding where to redirect user after failed login -->
+        <bean id="failureRedirectHandler"
+              class="org.springframework.security.web.authentication.SimpleUrlAuthenticationFailureHandler">
+            <property name="useForward" value="true"/>
+            <property name="defaultFailureUrl" value="/login"/>
+        </bean>
+
+        <!-- Handler for successful logout -->
+        <bean id="successLogoutHandler"
+              class="org.springframework.security.web.authentication.logout.SimpleUrlLogoutSuccessHandler">
+        </bean>
+
+        <!-- Logger for SAML messages and events -->
+        <bean id="samlLogger" class="org.springframework.security.saml.log.SAMLDefaultLogger"/>
+
+        <!-- Filter automatically generates default SP metadata -->
+        <bean id="metadataGeneratorFilter"
+              class="org.springframework.security.saml.metadata.MetadataGeneratorFilter">
+            <constructor-arg>
+                <bean class="org.springframework.security.saml.metadata.MetadataGenerator">
+                    <property name="extendedMetadata">
+                        <bean class="org.springframework.security.saml.metadata.ExtendedMetadata">
+                            <property name="idpDiscoveryEnabled" value="false"/>
+                        </bean>
+                    </property>
+                    <property name="entityBaseURL"
+                              value="${kylin.security.saml.metadata-entity-base-url}"/>
+                </bean>
+            </constructor-arg>
+        </bean>
+
+        <!-- Entry point to initialize authentication, default values taken from properties file -->
+        <bean id="samlEntryPoint" class="org.springframework.security.saml.SAMLEntryPoint">
+            <property name="defaultProfileOptions">
+                <bean class="org.springframework.security.saml.websso.WebSSOProfileOptions">
+                    <property name="includeScoping" value="false"/>
+                </bean>
+            </property>
+        </bean>
+
+        <!-- The filter is waiting for connections on URL suffixed with filterSuffix and presents SP metadata there -->
+        <bean id="metadataDisplayFilter"
+              class="org.springframework.security.saml.metadata.MetadataDisplayFilter"/>
+
+        <!-- IDP Metadata configuration - paths to metadata of IDPs in circle of trust is here -->
+        <bean id="metadata"
+              class="org.springframework.security.saml.metadata.CachingMetadataManager">
+            <constructor-arg>
... 2976 lines suppressed ...


[kylin] 02/04: KYLIN-4447 Kylin on kubernetes in a production env

Posted by xx...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xxyu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kylin.git

commit f78c2f1eaa8ba7c136b6f42bb1f7e07fbcd531be
Author: Julian Pan <li...@ebay.com>
AuthorDate: Sun Apr 26 13:07:29 2020 +0800

    KYLIN-4447 Kylin on kubernetes in a production env
---
 .../config/kylin/kylin-server-log4j.properties     |  30 +++++
 .../config/kylin/kylin-tools-log4j.properties      |  38 ++++++
 k8s/developments/kylin/kylin-all-statefulset.yaml  | 132 +++++++++++++++++++++
 k8s/developments/kylin/kylin-job-statefulset.yaml  | 123 +++++++++++++++++++
 .../kylin/kylin-query-statefulset.yaml             | 132 +++++++++++++++++++++
 .../kylin/kylin-receiver-statefulset.yaml          |  82 +++++++++++++
 k8s/developments/kylin/kylin-service.yaml          |  17 +++
 k8s/developments/memcached/memcached-service.yaml  |  12 ++
 .../memcached/memcached-statefulset.yaml           |  43 +++++++
 k8s/images/hadoop-client/Dockerfile                |  68 +++++++++++
 k8s/images/hadoop-client/README.MD                 |   5 +
 k8s/images/kylin/Dockerfile                        |  68 +++++++++++
 k8s/images/kylin/bin/bootstrap.sh                  |  20 ++++
 k8s/images/kylin/bin/check-liveness.sh             |  10 ++
 k8s/images/kylin/bin/check-readiness.sh            |   1 +
 k8s/images/kylin/bin/clean-log.sh                  |  23 ++++
 k8s/images/kylin/crontab.txt                       |   1 +
 kubernetes/example/config/filebeat/filebeat.yml    | 110 +++++++++++++++++
 18 files changed, 915 insertions(+)

diff --git a/k8s/developments/config/kylin/kylin-server-log4j.properties b/k8s/developments/config/kylin/kylin-server-log4j.properties
new file mode 100644
index 0000000..5dab102
--- /dev/null
+++ b/k8s/developments/config/kylin/kylin-server-log4j.properties
@@ -0,0 +1,30 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+#define appenders
+log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.file.layout=org.apache.log4j.PatternLayout
+log4j.appender.file.File=${catalina.home}/../logs/kylin.log
+log4j.appender.file.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
+log4j.appender.file.Append=true
+
+#overall config
+log4j.rootLogger=INFO,file
+log4j.logger.org.apache.kylin=DEBUG
+log4j.logger.org.springframework=WARN
+log4j.logger.org.springframework.security=INFO
\ No newline at end of file
diff --git a/k8s/developments/config/kylin/kylin-tools-log4j.properties b/k8s/developments/config/kylin/kylin-tools-log4j.properties
new file mode 100644
index 0000000..54d18c2
--- /dev/null
+++ b/k8s/developments/config/kylin/kylin-tools-log4j.properties
@@ -0,0 +1,38 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# the kylin-tools-log4j.properties is mainly for configuring log properties on kylin tools, including:
+#   1. tools launched by kylin.sh script, e.g. DeployCoprocessorCLI
+#   2. DebugTomcat
+#   3. others
+#
+# It's called kylin-tools-log4j.properties so that it won't distract users from the other more important log4j config file: kylin-server-log4j.properties
+# enable this by -Dlog4j.configuration=kylin-tools-log4j.properties
+
+log4j.rootLogger=INFO,stderr
+
+log4j.appender.stderr=org.apache.log4j.ConsoleAppender
+log4j.appender.stderr.Target=System.err
+log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
+log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
+
+#log4j.logger.org.apache.hadoop=ERROR
+log4j.logger.org.apache.kylin=DEBUG
+log4j.logger.org.springframework=WARN
+log4j.logger.org.apache.kylin.tool.shaded=INFO
\ No newline at end of file
diff --git a/k8s/developments/kylin/kylin-all-statefulset.yaml b/k8s/developments/kylin/kylin-all-statefulset.yaml
new file mode 100644
index 0000000..f97256e
--- /dev/null
+++ b/k8s/developments/kylin/kylin-all-statefulset.yaml
@@ -0,0 +1,132 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: kylin-all
+  namespace: kylin
+spec:
+  serviceName: kylin-svc
+  replicas: 1
+  selector:
+    matchExpressions:
+    - key: app
+      operator: In
+      values:
+      - kylin
+    - key: query
+      operator: In
+      values:
+      - "true"
+    - key: job
+      operator: In
+      values:
+      - "true"
+  template:
+    metadata:
+      labels:
+        app: kylin
+        query: "true"
+        job: "true"
+    spec:
+      containers:
+      - name: kylin
+        image: apachekylin/kylin:{version}
+        command:
+        - sh
+        - -c
+        args:
+        - cp $KYLIN_HOME/tomcat-config/* $KYLIN_HOME/tomcat/conf;
+          cp $KYLIN_HOME/sso-config/* $KYLIN_HOME/tomcat/webapps/kylin/WEB-INF/classes;
+          $TOOL_HOME/bootstrap.sh server -d;
+        ports:
+        - containerPort: 7070
+        - containerPort: 7443
+        livenessProbe:
+          exec:
+            command:
+            - sh
+            - -c
+            - $TOOL_HOME/check-liveness.sh
+          initialDelaySeconds: 120
+          periodSeconds: 60
+        readinessProbe:
+          exec:
+            command:
+            - sh
+            - -c
+            - $TOOL_HOME/check-readiness.sh
+          initialDelaySeconds: 120
+          periodSeconds: 60
+          failureThreshold: 30
+        volumeMounts:
+        - name: kylin-config
+          mountPath: /home/b_kylin/kylin2/conf
+        - name: tomcat-config
+          mountPath: /home/b_kylin/kylin2/tomcat-config
+        - name: kylin-logs
+          mountPath: /home/b_kylin/kylin2/logs
+        - name: tomcat-logs
+          mountPath: /home/b_kylin/kylin2/tomcat/logs
+        - name: sso-config
+          mountPath: /home/b_kylin/kylin2/sso-config
+        resources:
+          requests:
+            memory: 124Gi
+            cpu: 24
+          limits:
+            memory: 124Gi
+            cpu: 32
+      - name: filebeat
+        image: docker.elastic.co/beats/filebeat:6.4.3
+        args:
+        - -c
+        - /usr/share/filebeat/config/filebeat.yml
+        - -e
+        volumeMounts:
+        - name: kylin-logs
+          mountPath: /var/log/kylin
+        - name: tomcat-logs
+          mountPath: /var/log/tomcat
+        - name: filebeat-config
+          mountPath: /usr/share/filebeat/config
+          readOnly: true
+        resources:
+          requests:
+            memory: 4Gi
+            cpu: 2
+          limits:
+            memory: 4Gi
+            cpu: 2
+      volumes:
+      - name: kylin-logs
+        emptyDir:
+          sizeLimit: 30Gi
+      - name: tomcat-logs
+        emptyDir:
+          sizeLimit: 2Gi
+      - name: kylin-config
+        secret:
+          secretName: kylin-config
+          items:
+          - key: kylin.properties
+            path: kylin.properties
+          - key: kylin-tools-log4j.properties
+            path: kylin-tools-log4j.properties
+          - key: kylin_hive_conf.xml
+            path: kylin_hive_conf.xml
+          - key: kylin_job_conf_inmem.xml
+            path: kylin_job_conf_inmem.xml
+          - key: kylin-server-log4j.properties
+            path: kylin-server-log4j.properties
+          - key: kylin_job_conf.xml
+            path: kylin_job_conf.xml
+          - key: setenv.sh
+            path: setenv.sh
+      - name: tomcat-config
+        secret:
+          secretName: tomcat-config
+      - name: sso-config
+        secret:
+          secretName: sso-config
+      - name: filebeat-config
+        secret:
+          secretName: filebeat-config
\ No newline at end of file
diff --git a/k8s/developments/kylin/kylin-job-statefulset.yaml b/k8s/developments/kylin/kylin-job-statefulset.yaml
new file mode 100644
index 0000000..df95084
--- /dev/null
+++ b/k8s/developments/kylin/kylin-job-statefulset.yaml
@@ -0,0 +1,123 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: kylin-job
+  namespace: kylin
+spec:
+  serviceName: kylin-svc
+  replicas: 1
+  selector:
+    matchExpressions:
+    - key: app
+      operator: In
+      values:
+      - kylin
+    - key: query
+      operator: In
+      values:
+      - "false"
+    - key: job
+      operator: In
+      values:
+      - "true"
+  template:
+    metadata:
+      labels:
+        app: kylin
+        query: "false"
+        job: "true"
+    spec:
+      containers:
+      - name: kylin
+        image: apachekylin/kylin:{version}
+        command:
+        - sh
+        - -c
+        args:
+        - cp $KYLIN_HOME/tomcat-config/* $KYLIN_HOME/tomcat/conf;
+          cp $KYLIN_HOME/sso-config/* $KYLIN_HOME/tomcat/webapps/kylin/WEB-INF/classes;
+          $TOOL_HOME/bootstrap.sh server -d;
+        ports:
+        - containerPort: 7070
+        - containerPort: 7443
+        livenessProbe:
+          exec:
+            command:
+            - sh
+            - -c
+            - $TOOL_HOME/check-liveness.sh
+          initialDelaySeconds: 120
+          periodSeconds: 60
+        volumeMounts:
+        - name: kylin-config
+          mountPath: /home/b_kylin/kylin2/conf
+        - name: tomcat-config
+          mountPath: /home/b_kylin/kylin2/tomcat-config
+        - name: kylin-logs
+          mountPath: /home/b_kylin/kylin2/logs
+        - name: tomcat-logs
+          mountPath: /home/b_kylin/kylin2/tomcat/logs
+        - name: sso-config
+          mountPath: /home/b_kylin/kylin2/sso-config
+        resources:
+          requests:
+            memory: 124Gi
+            cpu: 24
+          limits:
+            memory: 124Gi
+            cpu: 32
+      - name: filebeat
+        image: docker.elastic.co/beats/filebeat:6.4.3
+        args:
+        - -c
+        - /usr/share/filebeat/config/filebeat.yml
+        - -e
+        volumeMounts:
+        - name: kylin-logs
+          mountPath: /var/log/kylin
+        - name: tomcat-logs
+          mountPath: /var/log/tomcat
+        - name: filebeat-config
+          mountPath: /usr/share/filebeat/config
+          readOnly: true
+        resources:
+          requests:
+            memory: 4Gi
+            cpu: 2
+          limits:
+            memory: 4Gi
+            cpu: 2
+      volumes:
+      - name: kylin-logs
+        emptyDir:
+          sizeLimit: 30Gi
+      - name: tomcat-logs
+        emptyDir:
+          sizeLimit: 2Gi
+      - name: kylin-config
+        secret:
+          secretName: kylin-config
+          items:
+          - key: kylin-job.properties
+            path: kylin.properties
+          - key: kylin-tools-log4j.properties
+            path: kylin-tools-log4j.properties
+          - key: kylin_hive_conf.xml
+            path: kylin_hive_conf.xml
+          - key: kylin_job_conf_inmem.xml
+            path: kylin_job_conf_inmem.xml
+          - key: kylin-server-log4j.properties
+            path: kylin-server-log4j.properties
+          - key: kylin_job_conf.xml
+            path: kylin_job_conf.xml
+          - key: setenv.sh
+            path: setenv.sh
+      - name: tomcat-config
+        secret:
+          secretName: tomcat-config
+      - name: sso-config
+        secret:
+          secretName: sso-config
+      - name: filebeat-config
+        secret:
+          secretName: filebeat-config
\ No newline at end of file
diff --git a/k8s/developments/kylin/kylin-query-statefulset.yaml b/k8s/developments/kylin/kylin-query-statefulset.yaml
new file mode 100644
index 0000000..fa1f3c0
--- /dev/null
+++ b/k8s/developments/kylin/kylin-query-statefulset.yaml
@@ -0,0 +1,132 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: kylin-query
+  namespace: kylin
+spec:
+  serviceName: kylin-svc
+  replicas: 1
+  selector:
+    matchExpressions:
+    - key: app
+      operator: In
+      values:
+      - kylin
+    - key: query
+      operator: In
+      values:
+      - "true"
+    - key: job
+      operator: In
+      values:
+      - "false"
+  template:
+    metadata:
+      labels:
+        app: kylin
+        query: "true"
+        job: "false"
+    spec:
+      containers:
+      - name: kylin
+        image: apachekylin/kylin:{version}
+        command:
+        - sh
+        - -c
+        args:
+        - cp $KYLIN_HOME/tomcat-config/* $KYLIN_HOME/tomcat/conf;
+          cp $KYLIN_HOME/sso-config/* $KYLIN_HOME/tomcat/webapps/kylin/WEB-INF/classes;
+          $TOOL_HOME/bootstrap.sh server -d;
+        ports:
+        - containerPort: 7070
+        - containerPort: 7443
+        livenessProbe:
+          exec:
+            command:
+            - sh
+            - -c
+            - $TOOL_HOME/check-liveness.sh
+          initialDelaySeconds: 120
+          periodSeconds: 60
+        readinessProbe:
+          exec:
+            command:
+            - sh
+            - -c
+            - $TOOL_HOME/check-readiness.sh
+          initialDelaySeconds: 120
+          periodSeconds: 60
+          failureThreshold: 30
+        volumeMounts:
+        - name: kylin-config
+          mountPath: /home/b_kylin/kylin2/conf
+        - name: tomcat-config
+          mountPath: /home/b_kylin/kylin2/tomcat-config
+        - name: kylin-logs
+          mountPath: /home/b_kylin/kylin2/logs
+        - name: tomcat-logs
+          mountPath: /home/b_kylin/kylin2/tomcat/logs
+        - name: sso-config
+          mountPath: /home/b_kylin/kylin2/sso-config
+        resources:
+          requests:
+            memory: 124Gi
+            cpu: 24
+          limits:
+            memory: 124Gi
+            cpu: 32
+      - name: filebeat
+        image: docker.elastic.co/beats/filebeat:6.4.3
+        args:
+        - -c
+        - /usr/share/filebeat/config/filebeat.yml
+        - -e
+        volumeMounts:
+        - name: kylin-logs
+          mountPath: /var/log/kylin
+        - name: tomcat-logs
+          mountPath: /var/log/tomcat
+        - name: filebeat-config
+          mountPath: /usr/share/filebeat/config
+          readOnly: true
+        resources:
+          requests:
+            memory: 4Gi
+            cpu: 2
+          limits:
+            memory: 4Gi
+            cpu: 2
+      volumes:
+      - name: kylin-logs
+        emptyDir:
+          sizeLimit: 30Gi
+      - name: tomcat-logs
+        emptyDir:
+          sizeLimit: 2Gi
+      - name: kylin-config
+        secret:
+          secretName: kylin-config
+          items:
+          - key: kylin-query.properties
+            path: kylin.properties
+          - key: kylin-tools-log4j.properties
+            path: kylin-tools-log4j.properties
+          - key: kylin_hive_conf.xml
+            path: kylin_hive_conf.xml
+          - key: kylin_job_conf_inmem.xml
+            path: kylin_job_conf_inmem.xml
+          - key: kylin-server-log4j.properties
+            path: kylin-server-log4j.properties
+          - key: kylin_job_conf.xml
+            path: kylin_job_conf.xml
+          - key: setenv.sh
+            path: setenv.sh
+      - name: tomcat-config
+        secret:
+          secretName: tomcat-config
+      - name: sso-config
+        secret:
+          secretName: sso-config
+      - name: filebeat-config
+        secret:
+          secretName: filebeat-config
\ No newline at end of file
diff --git a/k8s/developments/kylin/kylin-receiver-statefulset.yaml b/k8s/developments/kylin/kylin-receiver-statefulset.yaml
new file mode 100644
index 0000000..4c35b45
--- /dev/null
+++ b/k8s/developments/kylin/kylin-receiver-statefulset.yaml
@@ -0,0 +1,82 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: kylin-receiver
+  namespace: kylin
+spec:
+  serviceName: receiver-svc
+  replicas: 10
+  selector:
+    matchExpressions:
+    - key: app
+      operator: In
+      values:
+        - kylin-receiver
+  template:
+    metadata:
+      labels:
+        app: kylin-receiver
+    spec:
+      securityContext:
+        fsGroup: 996
+      containers:
+      - name: kylin
+        image: apachekylin/kylin:{version}
+        command:
+        - sh
+        - -c
+        args:
+        - $TOOL_HOME/bootstrap.sh streaming -d
+        ports:
+        - containerPort: 9090
+        livenessProbe:
+          httpGet:
+            path: /kylin/api/stats/healthCheck
+            port: 9090
+          initialDelaySeconds: 120
+          periodSeconds: 60
+        volumeMounts:
+        - name: kylin-config
+          mountPath: /home/b_kylin/kylin2/conf
+        - name: receiver-volume
+          mountPath: /home/b_kylin/kylin2/stream_index
+        resources:
+          requests:
+            memory: 86Gi
+            cpu: 16
+          limits:
+            memory: 86Gi
+            cpu: 16
+      volumes:
+      - name: kylin-config
+        secret:
+          secretName: kylin-config
+          items:
+          - key: kylin-receiver.properties
+            path: kylin.properties
+          - key: kylin-tools-log4j.properties
+            path: kylin-tools-log4j.properties
+          - key: kylin_hive_conf.xml
+            path: kylin_hive_conf.xml
+          - key: kylin_job_conf_inmem.xml
+            path: kylin_job_conf_inmem.xml
+          - key: kylin-server-log4j.properties
+            path: kylin-server-log4j.properties
+          - key: kylin_job_conf.xml
+            path: kylin_job_conf.xml
+          - key: kylin-receiver-setenv.sh
+            path: setenv.sh
+          - key: mystore.jks
+            path: mystore.jks
+          - key: truststore.jks
+            path: truststore.jks
+  volumeClaimTemplates:
+  - metadata:
+      name: receiver-volume
+    spec:
+      accessModes: 
+        - ReadWriteOnce
+      resources:
+        requests:
+          storage: 400Gi
+      storageClassName: local-dynamic
\ No newline at end of file
diff --git a/k8s/developments/kylin/kylin-service.yaml b/k8s/developments/kylin/kylin-service.yaml
new file mode 100644
index 0000000..2a0a9f8
--- /dev/null
+++ b/k8s/developments/kylin/kylin-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: kylin-svc
+  namespace: kylin
+spec:
+  ports:
+  - name: http
+    port: 80
+    targetPort: 7070
+  - name: https
+    port: 443
+    targetPort: 7443
+  selector:
+    app: kylin
+    query: "true"
+  type: LoadBalancer
\ No newline at end of file
diff --git a/k8s/developments/memcached/memcached-service.yaml b/k8s/developments/memcached/memcached-service.yaml
new file mode 100644
index 0000000..3f952a3
--- /dev/null
+++ b/k8s/developments/memcached/memcached-service.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: cache-svc
+  namespace: kylin
+spec:
+  clusterIP: None
+  selector: 
+    app: kylin-memcached
+  ports:
+  - port: 11211
+    targetPort: 11211
\ No newline at end of file
diff --git a/k8s/developments/memcached/memcached-statefulset.yaml b/k8s/developments/memcached/memcached-statefulset.yaml
new file mode 100644
index 0000000..38e0b0d
--- /dev/null
+++ b/k8s/developments/memcached/memcached-statefulset.yaml
@@ -0,0 +1,43 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: kylin-memcached
+  namespace: kylin
+spec:
+  serviceName: cache-svc
+  replicas: 3
+  selector:
+    matchExpressions:
+    - key: app
+      operator: In
+      values:
+      - kylin-memcached
+  template:
+    metadata:
+      labels:
+        app: kylin-memcached
+    spec:
+      containers:
+      - image: memcached:1.4.39
+        name: memcached
+        ports:
+        - containerPort: 11211
+        args:
+        - "-m 20480"
+        resources:
+          limits:
+            cpu: 4
+            memory: 24Gi
+          requests:
+            cpu: 4
+            memory: 24Gi
+        livenessProbe:
+          tcpSocket:
+            port: 11211
+          initialDelaySeconds: 30
+          timeoutSeconds: 5
+        readinessProbe:
+          tcpSocket:
+            port: 11211
+          initialDelaySeconds: 5
+          timeoutSeconds: 1
\ No newline at end of file
diff --git a/k8s/images/hadoop-client/Dockerfile b/k8s/images/hadoop-client/Dockerfile
new file mode 100644
index 0000000..d45f1b9
--- /dev/null
+++ b/k8s/images/hadoop-client/Dockerfile
@@ -0,0 +1,68 @@
+FROM centos:7.3.1611
+
+MAINTAINER Apache Kylin
+
+WORKDIR /tmp
+
+# install jdk and other commands
+RUN set -x \
+    && yum install -y which \
+       java-1.8.0-openjdk \
+       java-1.8.0-openjdk-devel \
+       krb5-workstation \
+    && yum clean all
+
+
+# version variables
+ARG HADOOP_VERSION=2.7.4
+ARG HIVE_VERSION=1.2.2
+ARG HBASE_VERSION=1.3.1
+ARG SPARK_VERSION=2.2.1
+ARG ZK_VERSION=3.4.6
+
+ARG APACHE_HOME=/apache
+
+RUN set -x \
+    && mkdir -p $APACHE_HOME
+
+ENV JAVA_HOME /etc/alternatives/jre
+
+# install hadoop
+ENV HADOOP_HOME=$APACHE_HOME/hadoop
+ADD hdp-clients/hadoop-${HADOOP_VERSION}.tar.gz $APACHE_HOME
+RUN set -x \
+    && ln -s $APACHE_HOME/hadoop-${HADOOP_VERSION} $HADOOP_HOME
+
+#install hive
+ENV HIVE_HOME=$APACHE_HOME/hive 
+ADD hdp-clients/apache-hive-${HIVE_VERSION}.tar.gz $APACHE_HOME
+RUN set -x \
+    && ln -s $APACHE_HOME/apache-hive-${HIVE_VERSION} $HIVE_HOME
+
+#install hbase
+ENV HBASE_HOME=$APACHE_HOME/hbase
+ADD hdp-clients/hbase-${HBASE_VERSION}.tar.gz $APACHE_HOME
+RUN set -x \
+    && ln -s $APACHE_HOME/hbase-${HBASE_VERSION} $HBASE_HOME
+
+#install spark
+ENV SPARK_HOME=$APACHE_HOME/spark
+ADD hdp-clients/spark-${SPARK_VERSION}.tar.gz $APACHE_HOME
+RUN set -x \
+    && ln -s $APACHE_HOME/spark-${SPARK_VERSION} $SPARK_HOME
+
+#install zk
+ENV ZK_HOME=$APACHE_HOME/zookeeper
+ADD hdp-clients/zookeeper-${ZK_VERSION}.tar.gz $APACHE_HOME
+RUN set -x \
+    && ln -s $APACHE_HOME/zookeeper-${ZK_VERSION} $ZK_HOME
+
+ENV PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HIVE_HOME/bin:$HBASE_HOME/bin:$ZK_HOME/bin
+
+COPY conf/hadoop $HADOOP_HOME/etc/hadoop
+COPY conf/hbase $HBASE_HOME/conf
+COPY conf/hive $HIVE_HOME/conf
+COPY conf/krb5.conf /etc/krb5.conf
+
+# Cleanup
+RUN rm -rf /tmp/*
diff --git a/k8s/images/hadoop-client/README.MD b/k8s/images/hadoop-client/README.MD
new file mode 100644
index 0000000..6cb56e8
--- /dev/null
+++ b/k8s/images/hadoop-client/README.MD
@@ -0,0 +1,5 @@
+Build Step
+1. Please download/copy your hadoop client to hadoop-client folder, which includes hadoop, hive, hbase, spark and zookeeper.
+2. Please copy your hadoop config to config folder, which includes hadoop, hive, hbase and krb5.
+2. Update related version variables in Dockerfile.
+3. Run docker build -t "apachekylin/kylin-hdp-client:${VERSION}"
\ No newline at end of file
diff --git a/k8s/images/kylin/Dockerfile b/k8s/images/kylin/Dockerfile
new file mode 100644
index 0000000..03bb76a
--- /dev/null
+++ b/k8s/images/kylin/Dockerfile
@@ -0,0 +1,68 @@
+ARG HADOOP_CLIENT_VERSION=3.0.0
+
+FROM apachekylin/hadoop-client:$HADOOP_CLIENT_VERSION
+
+MAINTAINER Apache Kylin
+
+WORKDIR /tmp
+
+# install system tools
+RUN set -x \
+    && yum install -y openssh-clients \
+       cronie \
+       unzip \
+       sudo \
+       net-tools \
+       iftop \
+       tcpdump \
+       perf \
+       telnet \
+       bind-utils \
+    && yum clean all
+
+ARG KYLIN_VERSION=3.0.0
+ARG USER=apache_kylin
+
+ARG USER_HOME=/home/$USER
+RUN set -x \
+    && groupadd -r $USER \
+    && useradd -r -m -g $USER $USER -d $USER_HOME \
+    && echo '$USER ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
+
+ENV KYLIN_HOME=$USER_HOME/kylin
+
+
+COPY --chown=$USER:$USER apache-kylin-${KYLIN_VERSION}-bin $KYLIN_HOME
+
+ARG HADOOP_CONF_HOME=/apache/hadoop/etc/hadoop
+ARG HIVE_CONF_HOME=/apache/hive/conf
+ARG HBASE_CONF_HOME=/apache/hbase/conf
+ARG SPARK_CONF_HOME=$KYLIN_HOME/hadoop-conf
+
+RUN set -x \
+    && unzip -qq $KYLIN_HOME/tomcat/webapps/kylin.war -d $KYLIN_HOME/tomcat/webapps/kylin \
+    && chown -R $USER:$USER $KYLIN_HOME/tomcat/webapps/kylin \
+    && rm $KYLIN_HOME/tomcat/webapps/kylin.war \
+    && mkdir $SPARK_CONF_HOME \
+    && ln -s $HADOOP_CONF_HOME/core-site.xml $SPARK_CONF_HOME/core-site.xml \
+    && ln -s $HADOOP_CONF_HOME/hdfs-site.xml $SPARK_CONF_HOME/hdfs-site.xml \
+    && ln -s $HADOOP_CONF_HOME/yarn-site.xml $SPARK_CONF_HOME/yarn-site.xml \
+    && ln -s $HADOOP_CONF_HOME/hdfs-variable.xml $SPARK_CONF_HOME/hdfs-variable.xml\
+    && ln -s $HADOOP_CONF_HOME/yarn-variable.xml $SPARK_CONF_HOME/yarn-variable.xml\
+    && ln -s $HADOOP_CONF_HOME/federation-mapping.xml $SPARK_CONF_HOME/federation-mapping.xml\
+    && ln -s $HIVE_CONF_HOME/hive-site.xml $SPARK_CONF_HOME/hive-site.xml \
+    && ln -s $HBASE_CONF_HOME/hbase-site.xml $SPARK_CONF_HOME/hbase-site.xml \
+    && chown -R $USER:$USER $SPARK_CONF_HOME
+
+ENV TOOL_HOME=$USER_HOME/bin
+COPY bin $TOOL_HOME
+COPY crontab.txt /tmp/crontab.txt
+
+RUN /usr/bin/crontab -u $USER /tmp/crontab.txt \
+    && rm -rf /tmp/* \
+    && chmod 755 $TOOL_HOME/*
+EXPOSE 7070
+
+USER $USER
+
+CMD ["sh", "-c", "$TOOL_HOME/bootstrap.sh server -d"]
diff --git a/k8s/images/kylin/bin/bootstrap.sh b/k8s/images/kylin/bin/bootstrap.sh
new file mode 100755
index 0000000..bbce0b9
--- /dev/null
+++ b/k8s/images/kylin/bin/bootstrap.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# enable kite
+/home/b_kylin/kite2/b_kylin/bin/run.sh
+
+# enable cron job
+sudo crond -i -p
+
+#sleep 60 second due to kite enable need more time
+sleep 60
+
+if [[ $1 == "server" ]]; then
+  $KYLIN_HOME/bin/kylin.sh start
+elif [[ $1 == "streaming" ]]; then
+  $KYLIN_HOME/bin/kylin.sh streaming start
+fi
+
+if [[ $2 == "-d" ]]; then
+  while true; do sleep 1000; done
+fi
diff --git a/k8s/images/kylin/bin/check-liveness.sh b/k8s/images/kylin/bin/check-liveness.sh
new file mode 100644
index 0000000..6aa0df1
--- /dev/null
+++ b/k8s/images/kylin/bin/check-liveness.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+result="$(curl --write-out %{http_code} --silent --output /dev/null http://127.0.0.1:7070/kylin/)"
+if [ $result == 200 ]
+then
+  echo "check http get successful"
+  exit 0
+else
+  echo "check http get failed"
+  exit 1
+fi
diff --git a/k8s/images/kylin/bin/check-readiness.sh b/k8s/images/kylin/bin/check-readiness.sh
new file mode 100644
index 0000000..a9bf588
--- /dev/null
+++ b/k8s/images/kylin/bin/check-readiness.sh
@@ -0,0 +1 @@
+#!/bin/bash
diff --git a/k8s/images/kylin/bin/clean-log.sh b/k8s/images/kylin/bin/clean-log.sh
new file mode 100644
index 0000000..5b76d68
--- /dev/null
+++ b/k8s/images/kylin/bin/clean-log.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+export KYLIN_HOME=/home/b_kylin/kylin2
+
+# Rotate kylin out
+timestamp=`date +%Y-%m-%d`
+mv ${KYLIN_HOME}/logs/kylin.out ${KYLIN_HOME}/logs/kylin.out.$timestamp
+mv /tmp/cron_b_kylin.out /tmp/cron_b_kylin.out.$timestamp
+
+# Delete kylin log before 3 days
+find ${KYLIN_HOME}/logs  -mtime +2 -type f -delete
+
+# Delete kylin tomcat log before 3 days
+find ${KYLIN_HOME}/tomcat/logs -mtime +2 -type f -delete
+
+# Delete keytab log before 3 days
+find /tmp/ -name 'cron_b_kylin.out.*' -mtime +2 -type f -delete
+
+# Delete hive log and temp files
+find /tmp/ -name '*_resources' -mtime +1 -type d -exec rm -rf {} +
+find /tmp/ -name 'hadoop-unjar*' -mtime +1 -type d -exec rm -rf {} +
+find /tmp/b_kylin/ -mtime +1 -type f -delete
+find /tmp/b_kylin/ -mtime +1 -type d -exec rm -rf {} +
\ No newline at end of file
diff --git a/k8s/images/kylin/crontab.txt b/k8s/images/kylin/crontab.txt
new file mode 100644
index 0000000..0fa9cec
--- /dev/null
+++ b/k8s/images/kylin/crontab.txt
@@ -0,0 +1 @@
+0 0 * * * /home/b_kylin/bin/clean-log.sh
diff --git a/kubernetes/example/config/filebeat/filebeat.yml b/kubernetes/example/config/filebeat/filebeat.yml
new file mode 100644
index 0000000..c78c7e6
--- /dev/null
+++ b/kubernetes/example/config/filebeat/filebeat.yml
@@ -0,0 +1,110 @@
+###################### Filebeat Configuration Example #########################
+
+# This file is an example configuration file highlighting only the most common
+# options. The filebeat.reference.yml file from the same directory contains all the
+# supported options with more comments. You can use it as a reference.
+#
+# You can find the full configuration reference here:
+# https://www.elastic.co/guide/en/beats/filebeat/index.html
+
+# For more available modules and options, please see the filebeat.reference.yml sample
+# configuration file.
+
+#=========================== Filebeat prospectors =============================
+
+filebeat.prospectors:
+
+# kylin log #
+- type: log
+  enabled: true
+  paths:
+    - /var/log/kylin/kylin.log
+  multiline.pattern: '^\d{4}-([1][0-2]|[0][0-9])-([3][0-1]|[1-2][0-9]|[0][1-9]|[1-9]) ([2][0-3]|[0-1][0-9]|[1-9]):[0-5][0-9]:([0-5][0-9]|[6][0])'
+  multiline.negate: true
+  multiline.match: after
+  fields:
+    name: "service"
+    pipeline: "service"
+
+# kylin out#
+- type: log
+  enabled: true
+  paths:
+    - /var/log/kylin/kylin.gc.*.current
+  multiline.pattern: '^\d{4}-(?:0?[1-9]|1[0-2])-(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9])T(?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?)(?:Z|[+-](?:2[0123]|[01]?[0-9])(?::?(?:[0-5][0-9])))'
+  multiline.negate: true
+  multiline.match: after
+  fields:
+    name: "gc"
+    pipeline: "gc"
+
+# kylin out#
+- type: log
+  enabled: true
+  paths:
+    - /var/log/kylin/kylin.out
+  multiline.pattern: '^\b(?:Jan?|Feb?|Mar?|Apr?|May?|Jun?|Jul?|Aug?|Sep?|Oct?|Nov?|Dec?)\b (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]), \d{4} (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) (?:AM|PM)'
+  multiline.negate: true
+  multiline.match: after
+  fields:
+    name: "out"
+    pipeline: "catalina"
+
+# access log #
+- type: log
+  paths:
+    - /var/log/tomcat/localhost_access_log.txt
+  fields:
+    name: "access"
+    pipeline: "access"
+
+# catalina log #
+- type: log
+  paths:
+    - /var/log/tomcat/catalina.*.log
+  multiline.pattern: '^\b(?:Jan?|Feb?|Mar?|Apr?|May?|Jun?|Jul?|Aug?|Sep?|Oct?|Nov?|Dec?)\b (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]), \d{4} (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) (?:AM|PM)'
+  multiline.negate: true
+  multiline.match: after
+  fields:
+    name: "catalina"
+    pipeline: "catalina"
+
+# localhost log #
+- type: log
+  paths:
+    - /var/log/tomcat/localhost.*.log
+  multiline.pattern: '^\b(?:Jan?|Feb?|Mar?|Apr?|May?|Jun?|Jul?|Aug?|Sep?|Oct?|Nov?|Dec?)\b (?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9]), \d{4} (?:2[0123]|[01]?[0-9]):(?:[0-5][0-9]):(?:(?:[0-5]?[0-9]|60)(?:[:.,][0-9]+)?) (?:AM|PM)'
+  multiline.negate: true
+  multiline.match: after
+  fields:
+    name: "localhost"
+    pipeline: "catalina"
+
+
+#============================= Filebeat modules ===============================
+
+filebeat.config.modules:
+  path: ${path.config}/modules.d/*.yml
+  reload.enabled: false
+
+
+#==================== Elasticsearch template setting ==========================
+
+setup.template:
+  enabled: false
+
+#============================== Kibana =====================================
+setup.kibana:
+
+#================================ Outputs =====================================
+
+# Configure what output to use when sending the data collected by the beat.
+
+#-------------------------- Elasticsearch output ------------------------------
+output.elasticsearch:
+  # Array of hosts to connect to.
+  hosts: []
+  protocol: "https"
+  username: ""
+  password: ""
+  index: "kylin-%{[fields.name]}-%{+yyyy-MM-dd}"
\ No newline at end of file


[kylin] 04/04: KYLIN-4447 Upload a complete example for CDH5.7 env(2 job + 2 query)

Posted by xx...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xxyu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kylin.git

commit 50c374be7d02ab3e29b24cf7b16b0d1b6047d9b7
Author: XiaoxiangYu <hi...@126.com>
AuthorDate: Sun Apr 26 20:05:26 2020 +0800

    KYLIN-4447 Upload a complete example for CDH5.7 env(2 job + 2 query)
---
 .gitignore                                         |   3 -
 kubernetes/README                                  |   6 -
 kubernetes/README.md                               |  37 ++
 kubernetes/config/README                           |   1 -
 kubernetes/config/README.md                        |   3 +
 kubernetes/config/production/filebeat/filebeat.yml |  19 +-
 kubernetes/config/production/hadoop/core-site.xml  |  15 +
 kubernetes/config/production/hadoop/hbase-site.xml |  15 +
 kubernetes/config/production/hadoop/hdfs-site.xml  |  15 +
 kubernetes/config/production/hadoop/hive-site.xml  |  15 +
 .../config/production/hadoop/mapred-site.xml       |  15 +
 kubernetes/config/production/hadoop/yarn-site.xml  |  15 +
 .../production/kylin-all/kylin-kafka-consumer.xml  |  31 --
 .../kylin-all/kylin-spark-log4j.properties         |  43 ---
 .../kylin-all/kylin-tools-log4j.properties         |  38 --
 .../config/production/kylin-all/kylin.properties   | 419 ---------------------
 .../production/kylin-all/kylin_hive_conf.xml       | 102 -----
 .../config/production/kylin-all/kylin_job_conf.xml |  88 -----
 .../kylin-all/kylin_job_conf_cube_merge.xml        | 104 -----
 .../production/kylin-all/kylin_job_conf_inmem.xml  | 111 ------
 .../config/production/kylin-all/setenv-tool.sh     |  73 ----
 kubernetes/config/production/kylin-all/setenv.sh   |  73 ----
 .../config/production/kylin-job/kylin.properties   |  16 +-
 kubernetes/config/production/kylin-job/setenv.sh   |   2 +-
 .../production/kylin-more/applicationContext.xml   |   1 +
 .../config/production/kylin-query/kylin.properties |  11 +-
 kubernetes/config/production/kylin-query/setenv.sh |   2 +-
 .../config/production/streaming-receiver/setenv.sh |   2 +-
 kubernetes/config/production/tomcat/context.xml    |  48 +++
 kubernetes/config/production/tomcat/server.xml     | 142 +++++++
 kubernetes/config/quickstart/hadoop/core-site.xml  |  15 +
 kubernetes/config/quickstart/hadoop/hbase-site.xml |  15 +
 kubernetes/config/quickstart/hadoop/hdfs-site.xml  |  15 +
 kubernetes/config/quickstart/hadoop/hive-site.xml  |  15 +
 .../config/quickstart/hadoop/mapred-site.xml       |  15 +
 kubernetes/config/quickstart/hadoop/yarn-site.xml  |  15 +
 kubernetes/docker/README                           |   1 -
 kubernetes/docker/README.md                        |  10 +
 kubernetes/docker/hadoop-client/CDH57/Dockerfile   |  17 +
 .../docker/hadoop-client/CDH57/build-image.sh      |  16 +
 kubernetes/docker/hadoop-client/README.MD          |   9 -
 kubernetes/docker/hadoop-client/README.md          |  24 ++
 .../hadoop-client/apache-hadoop2.7/Dockerfile      |  32 +-
 .../hadoop-client/apache-hadoop2.7/build-image.sh  |  16 +
 kubernetes/docker/kylin-client/Dockerfile          |  28 +-
 kubernetes/docker/kylin-client/README              |   8 -
 kubernetes/docker/kylin-client/README.md           |   9 +
 kubernetes/docker/kylin-client/bin/bootstrap.sh    |  17 +
 .../docker/kylin-client/bin/check-liveness.sh      |  19 +-
 .../docker/kylin-client/bin/check-readiness.sh     |  17 +
 kubernetes/docker/kylin-client/bin/clean-log.sh    |  19 +-
 kubernetes/docker/kylin-client/build-image.sh      |  22 +-
 kubernetes/docker/upload.sh                        |   2 -
 kubernetes/example/README                          |  10 -
 .../deployment/kylin-job/kylin-job-service.yaml    |  17 -
 .../kylin-query/kylin-query-service.yaml           |  17 -
 .../deployment/memcached/memcached-service.yaml    |  13 -
 .../memcached/memcached-statefulset.yaml           |  42 ---
 kubernetes/template/README                         |   0
 kubernetes/template/production/check-deploy.sh     |   9 -
 kubernetes/template/production/cleanup.sh          |  25 +-
 kubernetes/template/production/deploy-kylin.sh     |  44 ++-
 .../deployment/kylin/kylin-all-statefulset.yaml    | 129 -------
 .../deployment/kylin/kylin-job-statefulset.yaml    |  75 ++--
 .../deployment/kylin/kylin-query-statefulset.yaml  |  66 ++--
 .../production/deployment/kylin/kylin-service.yaml |  18 +
 .../deployment/memcached/memcached-service.yaml    |  17 +
 .../memcached/memcached-statefulset.yaml           |  25 +-
 .../kylin-receiver-statefulset.yaml                |  40 +-
 kubernetes/template/production/example/README.md   | 173 +++++++++
 .../example/config/filebeat/filebeat.yml           |  17 +
 .../example/config/hadoop/core-site.xml            |  16 +-
 .../example/config/hadoop/hbase-site.xml           |  16 +-
 .../example/config/hadoop/hdfs-site.xml            |  16 +-
 .../example/config/hadoop/hive-site.xml            |  16 +-
 .../example/config/hadoop/mapred-site.xml          |  16 +-
 .../example/config/hadoop/yarn-site.xml            |  16 +-
 .../config/kylin-job}/kylin-kafka-consumer.xml     |   0
 .../kylin-job}/kylin-server-log4j.properties       |   0
 .../config/kylin-job}/kylin-spark-log4j.properties |   0
 .../config/kylin-job}/kylin-tools-log4j.properties |   0
 .../example/config/kylin-job/kylin.properties      |   0
 .../example/config/kylin-job}/kylin_hive_conf.xml  |   0
 .../example/config/kylin-job}/kylin_job_conf.xml   |   0
 .../kylin-job}/kylin_job_conf_cube_merge.xml       |   0
 .../config/kylin-job}/kylin_job_conf_inmem.xml     |   0
 .../example/config/kylin-job/setenv-tool.sh        |   0
 .../production/example/config/kylin-job}/setenv.sh |   0
 .../config/kylin-more/applicationContext.xml       |   0
 .../example/config/kylin-more/ehcache-test.xml     |   0
 .../example/config/kylin-more/ehcache.xml          |   0
 .../example/config/kylin-more/kylinMetrics.xml     |   0
 .../example/config/kylin-more/kylinSecurity.xml    |   0
 .../config/kylin-query}/kylin-kafka-consumer.xml   |   0
 .../kylin-query}/kylin-server-log4j.properties     |   0
 .../kylin-query}/kylin-spark-log4j.properties      |   0
 .../kylin-query}/kylin-tools-log4j.properties      |   0
 .../example/config/kylin-query/kylin.properties    |   2 +-
 .../config/kylin-query}/kylin_hive_conf.xml        |   0
 .../example/config/kylin-query}/kylin_job_conf.xml |   0
 .../kylin-query}/kylin_job_conf_cube_merge.xml     |   0
 .../config/kylin-query}/kylin_job_conf_inmem.xml   |   0
 .../example/config/kylin-query/setenv-tool.sh      |   0
 .../example/config/kylin-query}/setenv.sh          |   0
 .../production/example/config/tomcat/context.xml   |  48 +++
 .../production/example/config/tomcat/server.xml    | 142 +++++++
 .../example/deployment/deploy-sample-cluster.sh    |  81 ++--
 .../deployment/kylin-job/kylin-job-service.yaml}   |  32 +-
 .../kylin-job/kylin-job-statefulset.yaml           |  44 ++-
 .../kylin-query/kylin-query-statefulset.yaml       |  59 ++-
 .../deployment/memcached/memcached-service.yaml}   |  26 +-
 .../memcached/memcached-statefulset.yaml           |  59 +++
 kubernetes/template/quickstart/check-cluster.sh    |   7 -
 kubernetes/template/quickstart/cleanup.sh          |   2 -
 kubernetes/template/quickstart/deploy-kylin.sh     |  32 +-
 .../deployment/kylin/kylin-all-statefulset.yaml    |  57 ++-
 .../quickstart/deployment/kylin/kylin-service.yaml |  23 +-
 117 files changed, 1599 insertions(+), 1669 deletions(-)

diff --git a/.gitignore b/.gitignore
index 69d61d0..9a9ab5c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -86,7 +86,6 @@ webapp/app/components/*
 ebay*
 build/commit_SHA1
 dist/
-tomcat/
 webapp/app/kylin-servlet.xml
 webapp/app/web.xml
 dependency-reduced-pom.xml
@@ -94,5 +93,3 @@ webapp/package-lock.json
 
 # stream_index
 stream-receiver/stream_index
-# configuration files
-kubernetes/conf/*
diff --git a/kubernetes/README b/kubernetes/README
deleted file mode 100644
index c29e018..0000000
--- a/kubernetes/README
+++ /dev/null
@@ -1,6 +0,0 @@
-## Steps
-
-1. Build **hadoop-client** docker image.
-2. Build **kylin-client** docker image.
-3. Go to `config` dir, prepare configuration file for Kylin and Hadoop.
-4. Go to `template` dir,  modify system resources entry and deploy kylin service.
diff --git a/kubernetes/README.md b/kubernetes/README.md
new file mode 100644
index 0000000..964c612
--- /dev/null
+++ b/kubernetes/README.md
@@ -0,0 +1,37 @@
+## Backgroud
+Kubernetes is a portable, extensible, open-source platform for managing containerized workloads and services, that facilitates 
+both declarative configuration and automation. It has a large, rapidly growing ecosystem. Kubernetes services, support, 
+and tools are widely available.
+
+Apache Kylin is a open source, distributed analytical data warehouse for big data. Deploy Kylin on Kubernetes 
+cluster, will reduce cost of maintenance and extension.
+
+### Directory Introduction
+- **config**
+  Please update your configuration file here. 
+- **template**
+  This directory provided two deployment templates, one for **quick-start** purpose, another for **production/distributed** deployment.
+  1. Quick-start template is for one node deployment with an **ALL** kylin instance.
+  2. Production template is for multi-nodes deployment with a few of **job**/**query** kylin instances; and some other service 
+  like **memcached** and **filebeat** will help to satisfy log collection/query cache/session sharing demand.
+- **docker**
+  Docker image is the pre-requirement of Kylin on Kubernetes, please check this directory if you need build it yourself.
+  For CDH5.x user, you may consider use a provided image on DockerHub.
+- **template/production/example**
+  This is a complete example by applying production template in a CDH 5.7 hadoop env with step by step guide.  
+ 
+### Note 
+1. CuratorScheduler is used as default JobScheduler because it is more flexible.
+2. Spark building require use `cluster` as deployMode. If you forget it, your spark application will never submitted successfully because Hadoop cluster can not resolve hostname of Pod (Spark Driver).
+3. To modify `/etc/hosts` in Pod, please check this : https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ . 
+4. To build you own kylin-client docker image, please don't forget to download and put following jars into KYLIN_HOME/tomcat/lib to enable tomcat session sharing.
+    - https://repo1.maven.org/maven2/de/javakaffee/msm/memcached-session-manager-tc7/2.1.1/
+    - https://repo1.maven.org/maven2/de/javakaffee/msm/memcached-session-manager/2.1.1/
+5. If you have difficulty in configure filebeat, please check this https://www.elastic.co/guide/en/beats/filebeat/current/index.html .
+6. External query cache is enabled by default, if you are interested in detail, you may check http://kylin.apache.org/blog/2019/07/30/detailed-analysis-of-refine-query-cache/ .
+7. All configuration files is separated from Docker image, please use configMap or secret. Compared to configMap, secrets is more recommended for security reason.
+8. Some verified kylin-client image will be published to DockerHub, here is the link https://hub.docker.com/r/apachekylin/kylin-client . You may consider contributed your Dockerfile to kylin's repo if you are interested.
+ 
+### Reference 
+- JIRA ticket: https://issues.apache.org/jira/browse/KYLIN-4447
+- DockerHub: https://hub.docker.com/r/apachekylin/kylin-client
\ No newline at end of file
diff --git a/kubernetes/config/README b/kubernetes/config/README
deleted file mode 100644
index e08320d..0000000
--- a/kubernetes/config/README
+++ /dev/null
@@ -1 +0,0 @@
-Please replace with your own configuration here.
\ No newline at end of file
diff --git a/kubernetes/config/README.md b/kubernetes/config/README.md
new file mode 100644
index 0000000..d401ce7
--- /dev/null
+++ b/kubernetes/config/README.md
@@ -0,0 +1,3 @@
+## Step
+1. Please replace with your own configuration files here.
+2. Follow the manual to create config map for each directory.
\ No newline at end of file
diff --git a/kubernetes/config/production/filebeat/filebeat.yml b/kubernetes/config/production/filebeat/filebeat.yml
index 6916da1..8fcbba0 100644
--- a/kubernetes/config/production/filebeat/filebeat.yml
+++ b/kubernetes/config/production/filebeat/filebeat.yml
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 ###################### Filebeat Configuration Example #########################
 
 # This file is an example configuration file highlighting only the most common
@@ -103,6 +120,6 @@ setup.kibana:
 #-------------------------- Elasticsearch output ------------------------------
 output.elasticsearch:
   # Array of hosts to connect to.
-  hosts: ["cdh-master:9200"]
+  hosts: ["{elasticsearch-server}:9200"]
   protocol: "http"
   index: "kylin-%{[fields.name]}-%{+yyyy-MM-dd}"
\ No newline at end of file
diff --git a/kubernetes/config/production/hadoop/core-site.xml b/kubernetes/config/production/hadoop/core-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/production/hadoop/core-site.xml
+++ b/kubernetes/config/production/hadoop/core-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/config/production/hadoop/hbase-site.xml b/kubernetes/config/production/hadoop/hbase-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/production/hadoop/hbase-site.xml
+++ b/kubernetes/config/production/hadoop/hbase-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/config/production/hadoop/hdfs-site.xml b/kubernetes/config/production/hadoop/hdfs-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/production/hadoop/hdfs-site.xml
+++ b/kubernetes/config/production/hadoop/hdfs-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/config/production/hadoop/hive-site.xml b/kubernetes/config/production/hadoop/hive-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/production/hadoop/hive-site.xml
+++ b/kubernetes/config/production/hadoop/hive-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/config/production/hadoop/mapred-site.xml b/kubernetes/config/production/hadoop/mapred-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/production/hadoop/mapred-site.xml
+++ b/kubernetes/config/production/hadoop/mapred-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/config/production/hadoop/yarn-site.xml b/kubernetes/config/production/hadoop/yarn-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/production/hadoop/yarn-site.xml
+++ b/kubernetes/config/production/hadoop/yarn-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/config/production/kylin-all/kylin-kafka-consumer.xml b/kubernetes/config/production/kylin-all/kylin-kafka-consumer.xml
deleted file mode 100644
index 8529a41..0000000
--- a/kubernetes/config/production/kylin-all/kylin-kafka-consumer.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
- for more kafka consumer configs, please refer to http://kafka.apache.org/documentation#consumerconfigs
--->
-<configuration>
-    <property>
-        <name>session.timeout.ms</name>
-        <value>10000</value>
-    </property>
-    <property>
-        <name>request.timeout.ms</name>
-        <value>20000</value>
-    </property>
-</configuration>
\ No newline at end of file
diff --git a/kubernetes/config/production/kylin-all/kylin-spark-log4j.properties b/kubernetes/config/production/kylin-all/kylin-spark-log4j.properties
deleted file mode 100644
index 948fb32..0000000
--- a/kubernetes/config/production/kylin-all/kylin-spark-log4j.properties
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-log4j.rootCategory=WARN,stderr,stdout
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.target=System.out
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
-
-log4j.appender.stderr=org.apache.log4j.ConsoleAppender
-log4j.appender.stderr.Target=System.err
-log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
-log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
-
-
-# Settings to quiet third party logs that are too verbose
-log4j.logger.org.spark-project.jetty=WARN
-log4j.logger.org.spark-project.jetty.util.component.AbstractLifeCycle=ERROR
-log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
-log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
-log4j.logger.org.apache.parquet=ERROR
-log4j.logger.parquet=ERROR
-
-# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
-log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
-log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
-log4j.logger.org.apache.spark.sql=WARN
-
-log4j.logger.org.apache.kylin=DEBUG
\ No newline at end of file
diff --git a/kubernetes/config/production/kylin-all/kylin-tools-log4j.properties b/kubernetes/config/production/kylin-all/kylin-tools-log4j.properties
deleted file mode 100644
index 54d18c2..0000000
--- a/kubernetes/config/production/kylin-all/kylin-tools-log4j.properties
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-
-# the kylin-tools-log4j.properties is mainly for configuring log properties on kylin tools, including:
-#   1. tools launched by kylin.sh script, e.g. DeployCoprocessorCLI
-#   2. DebugTomcat
-#   3. others
-#
-# It's called kylin-tools-log4j.properties so that it won't distract users from the other more important log4j config file: kylin-server-log4j.properties
-# enable this by -Dlog4j.configuration=kylin-tools-log4j.properties
-
-log4j.rootLogger=INFO,stderr
-
-log4j.appender.stderr=org.apache.log4j.ConsoleAppender
-log4j.appender.stderr.Target=System.err
-log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
-log4j.appender.stderr.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
-
-#log4j.logger.org.apache.hadoop=ERROR
-log4j.logger.org.apache.kylin=DEBUG
-log4j.logger.org.springframework=WARN
-log4j.logger.org.apache.kylin.tool.shaded=INFO
\ No newline at end of file
diff --git a/kubernetes/config/production/kylin-all/kylin.properties b/kubernetes/config/production/kylin-all/kylin.properties
deleted file mode 100644
index c4d3cc1..0000000
--- a/kubernetes/config/production/kylin-all/kylin.properties
+++ /dev/null
@@ -1,419 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-kylin.cache.memcached.hosts=10.1.2.42:11211
-kylin.query.cache-signature-enabled=true
-kylin.query.lazy-query-enabled=true
-kylin.metrics.memcached.enabled=true
-kylin.query.segment-cache-enabled=true
-
-kylin.metrics.monitor-enabled=true
-kylin.metrics.reporter-query-enabled=true
-kylin.metrics.reporter-job-enabled=true
-# The below commented values will effect as default settings
-# Uncomment and override them if necessary
-
-
-
-#
-#### METADATA | ENV ###
-#
-## The metadata store in hbase
-kylin.metadata.url=kylin_metadata_k8s_prod@hbase
-#
-## metadata cache sync retry times
-#kylin.metadata.sync-retries=3
-#
-## Working folder in HDFS, better be qualified absolute path, make sure user has the right permission to this directory
-kylin.env.hdfs-working-dir=/kylin/apache_kylin
-#
-## DEV|QA|PROD. DEV will turn on some dev features, QA and PROD has no difference in terms of functions.
-#kylin.env=QA
-#
-## kylin zk base path
-#kylin.env.zookeeper-base-path=/kylin
-#
-#### SERVER | WEB | RESTCLIENT ###
-#
-## Kylin server mode, valid value [all, query, job]
-kylin.server.mode=all
-#
-## List of web servers in use, this enables one web server instance to sync up with other servers.
-#kylin.server.cluster-servers=localhost:7070
-#
-## Display timezone on UI,format like[GMT+N or GMT-N]
-#kylin.web.timezone=
-#
-## Timeout value for the queries submitted through the Web UI, in milliseconds
-#kylin.web.query-timeout=300000
-#
-#kylin.web.cross-domain-enabled=true
-#
-##allow user to export query result
-#kylin.web.export-allow-admin=true
-#kylin.web.export-allow-other=true
-#
-## Hide measures in measure list of cube designer, separate by comma
-#kylin.web.hide-measures=RAW
-#
-##max connections of one route
-#kylin.restclient.connection.default-max-per-route=20
-#
-##max connections of one rest-client
-#kylin.restclient.connection.max-total=200
-#
-#### PUBLIC CONFIG ###
-#kylin.engine.default=2
-#kylin.storage.default=2
-#kylin.web.hive-limit=20
-#kylin.web.help.length=4
-#kylin.web.help.0=start|Getting Started|http://kylin.apache.org/docs/tutorial/kylin_sample.html
-#kylin.web.help.1=odbc|ODBC Driver|http://kylin.apache.org/docs/tutorial/odbc.html
-#kylin.web.help.2=tableau|Tableau Guide|http://kylin.apache.org/docs/tutorial/tableau_91.html
-#kylin.web.help.3=onboard|Cube Design Tutorial|http://kylin.apache.org/docs/howto/howto_optimize_cubes.html
-#kylin.web.link-streaming-guide=http://kylin.apache.org/
-#kylin.htrace.show-gui-trace-toggle=false
-#kylin.web.link-hadoop=
-#kylin.web.link-diagnostic=
-#kylin.web.contact-mail=
-#kylin.server.external-acl-provider=
-#
-## Default time filter for job list, 0->current day, 1->last one day, 2->last one week, 3->last one year, 4->all
-#kylin.web.default-time-filter=1
-#
-#### SOURCE ###
-#
-## Hive client, valid value [cli, beeline]
-#kylin.source.hive.client=cli
-#
-## Absolute path to beeline shell, can be set to spark beeline instead of the default hive beeline on PATH
-#kylin.source.hive.beeline-shell=beeline
-#
-## Parameters for beeline client, only necessary if hive client is beeline
-##kylin.source.hive.beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
-#
-## While hive client uses above settings to read hive table metadata,
-## table operations can go through a separate SparkSQL command line, given SparkSQL connects to the same Hive metastore.
-#kylin.source.hive.enable-sparksql-for-table-ops=false
-##kylin.source.hive.sparksql-beeline-shell=/path/to/spark-client/bin/beeline
-##kylin.source.hive.sparksql-beeline-params=-n root --hiveconf hive.security.authorization.sqlstd.confwhitelist.append='mapreduce.job.*|dfs.*' -u jdbc:hive2://localhost:10000
-#
-#kylin.source.hive.keep-flat-table=false
-#
-## Hive database name for putting the intermediate flat tables
-#kylin.source.hive.database-for-flat-table=default
-#
-## Whether redistribute the intermediate flat table before building
-#kylin.source.hive.redistribute-flat-table=true
-#
-#
-#### STORAGE ###
-#
-## The storage for final cube file in hbase
-#kylin.storage.url=hbase
-#
-## The prefix of hbase table
-kylin.storage.hbase.table-name-prefix=K8S_
-#
-## The namespace for hbase storage
-kylin.storage.hbase.namespace=lacus
-#
-## Compression codec for htable, valid value [none, snappy, lzo, gzip, lz4]
-#kylin.storage.hbase.compression-codec=none
-#
-## HBase Cluster FileSystem, which serving hbase, format as hdfs://hbase-cluster:8020
-## Leave empty if hbase running on same cluster with hive and mapreduce
-##kylin.storage.hbase.cluster-fs=
-#
-## The cut size for hbase region, in GB.
-#kylin.storage.hbase.region-cut-gb=5
-#
-## The hfile size of GB, smaller hfile leading to the converting hfile MR has more reducers and be faster.
-## Set 0 to disable this optimization.
-#kylin.storage.hbase.hfile-size-gb=2
-#
-#kylin.storage.hbase.min-region-count=1
-#kylin.storage.hbase.max-region-count=500
-#
-## Optional information for the owner of kylin platform, it can be your team's email
-## Currently it will be attached to each kylin's htable attribute
-#kylin.storage.hbase.owner-tag=whoami@kylin.apache.org
-#
-#kylin.storage.hbase.coprocessor-mem-gb=3
-#
-## By default kylin can spill query's intermediate results to disks when it's consuming too much memory.
-## Set it to false if you want query to abort immediately in such condition.
-#kylin.storage.partition.aggr-spill-enabled=true
-#
-## The maximum number of bytes each coprocessor is allowed to scan.
-## To allow arbitrary large scan, you can set it to 0.
-#kylin.storage.partition.max-scan-bytes=3221225472
-#
-## The default coprocessor timeout is (hbase.rpc.timeout * 0.9) / 1000 seconds,
-## You can set it to a smaller value. 0 means use default.
-## kylin.storage.hbase.coprocessor-timeout-seconds=0
-#
-## clean real storage after delete operation
-## if you want to delete the real storage like htable of deleting segment, you can set it to true
-#kylin.storage.clean-after-delete-operation=false
-#
-#### JOB ###
-#
-## Max job retry on error, default 0: no retry
-#kylin.job.retry=0
-#
-## Max count of concurrent jobs running
-#kylin.job.max-concurrent-jobs=10
-#
-## The percentage of the sampling, default 100%
-#kylin.job.sampling-percentage=100
-#
-## If true, will send email notification on job complete
-##kylin.job.notification-enabled=true
-##kylin.job.notification-mail-enable-starttls=true
-##kylin.job.notification-mail-host=smtp.office365.com
-##kylin.job.notification-mail-port=587
-##kylin.job.notification-mail-username=kylin@example.com
-##kylin.job.notification-mail-password=mypassword
-##kylin.job.notification-mail-sender=kylin@example.com
-kylin.job.scheduler.provider.100=org.apache.kylin.job.impl.curator.CuratorScheduler
-kylin.job.scheduler.default=100
-#
-#### ENGINE ###
-#
-## Time interval to check hadoop job status
-#kylin.engine.mr.yarn-check-interval-seconds=10
-#
-#kylin.engine.mr.reduce-input-mb=500
-#
-#kylin.engine.mr.max-reducer-number=500
-#
-#kylin.engine.mr.mapper-input-rows=1000000
-#
-## Enable dictionary building in MR reducer
-#kylin.engine.mr.build-dict-in-reducer=true
-#
-## Number of reducers for fetching UHC column distinct values
-#kylin.engine.mr.uhc-reducer-count=3
-#
-## Whether using an additional step to build UHC dictionary
-#kylin.engine.mr.build-uhc-dict-in-additional-step=false
-#
-#
-#### CUBE | DICTIONARY ###
-#
-#kylin.cube.cuboid-scheduler=org.apache.kylin.cube.cuboid.DefaultCuboidScheduler
-#kylin.cube.segment-advisor=org.apache.kylin.cube.CubeSegmentAdvisor
-#
-## 'auto', 'inmem', 'layer' or 'random' for testing 
-#kylin.cube.algorithm=layer
-#
-## A smaller threshold prefers layer, a larger threshold prefers in-mem
-#kylin.cube.algorithm.layer-or-inmem-threshold=7
-#
-## auto use inmem algorithm:
-## 1, cube planner optimize job
-## 2, no source record
-#kylin.cube.algorithm.inmem-auto-optimize=true
-#
-#kylin.cube.aggrgroup.max-combination=32768
-#
-#kylin.snapshot.max-mb=300
-#
-#kylin.cube.cubeplanner.enabled=true
-#kylin.cube.cubeplanner.enabled-for-existing-cube=true
-#kylin.cube.cubeplanner.expansion-threshold=15.0
-#kylin.cube.cubeplanner.recommend-cache-max-size=200
-#kylin.cube.cubeplanner.mandatory-rollup-threshold=1000
-#kylin.cube.cubeplanner.algorithm-threshold-greedy=8
-#kylin.cube.cubeplanner.algorithm-threshold-genetic=23
-#
-#
-#### QUERY ###
-#
-## Controls the maximum number of bytes a query is allowed to scan storage.
-## The default value 0 means no limit.
-## The counterpart kylin.storage.partition.max-scan-bytes sets the maximum per coprocessor.
-#kylin.query.max-scan-bytes=0
-#
-kylin.query.cache-enabled=true
-#
-## Controls extras properties for Calcite jdbc driver
-## all extras properties should undder prefix "kylin.query.calcite.extras-props."
-## case sensitive, default: true, to enable case insensitive set it to false
-## @see org.apache.calcite.config.CalciteConnectionProperty.CASE_SENSITIVE
-#kylin.query.calcite.extras-props.caseSensitive=true
-## how to handle unquoted identity, defualt: TO_UPPER, available options: UNCHANGED, TO_UPPER, TO_LOWER
-## @see org.apache.calcite.config.CalciteConnectionProperty.UNQUOTED_CASING
-#kylin.query.calcite.extras-props.unquotedCasing=TO_UPPER
-## quoting method, default: DOUBLE_QUOTE, available options: DOUBLE_QUOTE, BACK_TICK, BRACKET
-## @see org.apache.calcite.config.CalciteConnectionProperty.QUOTING
-#kylin.query.calcite.extras-props.quoting=DOUBLE_QUOTE
-## change SqlConformance from DEFAULT to LENIENT to enable group by ordinal
-## @see org.apache.calcite.sql.validate.SqlConformance.SqlConformanceEnum
-#kylin.query.calcite.extras-props.conformance=LENIENT
-#
-## TABLE ACL
-#kylin.query.security.table-acl-enabled=true
-#
-## Usually should not modify this
-#kylin.query.interceptors=org.apache.kylin.rest.security.TableInterceptor
-#
-#kylin.query.escape-default-keyword=false
-#
-## Usually should not modify this
-#kylin.query.transformers=org.apache.kylin.query.util.DefaultQueryTransformer,org.apache.kylin.query.util.KeywordDefaultDirtyHack
-#
-#### SECURITY ###
-#
-## Spring security profile, options: testing, ldap, saml
-## with "testing" profile, user can use pre-defined name/pwd like KYLIN/ADMIN to login
-#kylin.security.profile=testing
-#
-## Admin roles in LDAP, for ldap and saml
-#kylin.security.acl.admin-role=admin
-#
-## LDAP authentication configuration
-#kylin.security.ldap.connection-server=ldap://ldap_server:389
-#kylin.security.ldap.connection-username=
-#kylin.security.ldap.connection-password=
-#
-## LDAP user account directory;
-#kylin.security.ldap.user-search-base=
-#kylin.security.ldap.user-search-pattern=
-#kylin.security.ldap.user-group-search-base=
-#kylin.security.ldap.user-group-search-filter=(|(member={0})(memberUid={1}))
-#
-## LDAP service account directory
-#kylin.security.ldap.service-search-base=
-#kylin.security.ldap.service-search-pattern=
-#kylin.security.ldap.service-group-search-base=
-#
-### SAML configurations for SSO
-## SAML IDP metadata file location
-#kylin.security.saml.metadata-file=classpath:sso_metadata.xml
-#kylin.security.saml.metadata-entity-base-url=https://hostname/kylin
-#kylin.security.saml.keystore-file=classpath:samlKeystore.jks
-#kylin.security.saml.context-scheme=https
-#kylin.security.saml.context-server-name=hostname
-#kylin.security.saml.context-server-port=443
-#kylin.security.saml.context-path=/kylin
-#
-#### SPARK ENGINE CONFIGS ###
-#
-## Hadoop conf folder, will export this as "HADOOP_CONF_DIR" to run spark-submit
-## This must contain site xmls of core, yarn, hive, and hbase in one folder
-##kylin.env.hadoop-conf-dir=/etc/hadoop/conf
-#
-## Estimate the RDD partition numbers
-#kylin.engine.spark.rdd-partition-cut-mb=10
-#
-## Minimal partition numbers of rdd
-#kylin.engine.spark.min-partition=1
-#
-## Max partition numbers of rdd
-#kylin.engine.spark.max-partition=5000
-#
-## Spark conf (default is in spark/conf/spark-defaults.conf)
-#kylin.engine.spark-conf.spark.master=yarn
-##kylin.engine.spark-conf.spark.submit.deployMode=cluster
-#kylin.engine.spark-conf.spark.yarn.queue=default
-#kylin.engine.spark-conf.spark.driver.memory=2G
-#kylin.engine.spark-conf.spark.executor.memory=4G
-#kylin.engine.spark-conf.spark.executor.instances=40
-#kylin.engine.spark-conf.spark.yarn.executor.memoryOverhead=1024
-#kylin.engine.spark-conf.spark.shuffle.service.enabled=true
-#kylin.engine.spark-conf.spark.eventLog.enabled=true
-#kylin.engine.spark-conf.spark.eventLog.dir=hdfs\:///kylin/spark-history
-#kylin.engine.spark-conf.spark.history.fs.logDirectory=hdfs\:///kylin/spark-history
-#kylin.engine.spark-conf.spark.hadoop.yarn.timeline-service.enabled=false
-#
-#### Spark conf for specific job
-#kylin.engine.spark-conf-mergedict.spark.executor.memory=6G
-#kylin.engine.spark-conf-mergedict.spark.memory.fraction=0.2
-#
-## manually upload spark-assembly jar to HDFS and then set this property will avoid repeatedly uploading jar at runtime
-##kylin.engine.spark-conf.spark.yarn.archive=hdfs://namenode:8020/kylin/spark/spark-libs.jar
-##kylin.engine.spark-conf.spark.io.compression.codec=org.apache.spark.io.SnappyCompressionCodec
-#
-## uncomment for HDP
-##kylin.engine.spark-conf.spark.driver.extraJavaOptions=-Dhdp.version=current
-##kylin.engine.spark-conf.spark.yarn.am.extraJavaOptions=-Dhdp.version=current
-##kylin.engine.spark-conf.spark.executor.extraJavaOptions=-Dhdp.version=current
-#
-#
-#### QUERY PUSH DOWN ###
-#
-##kylin.query.pushdown.runner-class-name=org.apache.kylin.query.adhoc.PushDownRunnerJdbcImpl
-#
-##kylin.query.pushdown.update-enabled=false
-##kylin.query.pushdown.jdbc.url=jdbc:hive2://sandbox:10000/default
-##kylin.query.pushdown.jdbc.driver=org.apache.hive.jdbc.HiveDriver
-##kylin.query.pushdown.jdbc.username=hive
-##kylin.query.pushdown.jdbc.password=
-#
-##kylin.query.pushdown.jdbc.pool-max-total=8
-##kylin.query.pushdown.jdbc.pool-max-idle=8
-##kylin.query.pushdown.jdbc.pool-min-idle=0
-#
-#### JDBC Data Source
-##kylin.source.jdbc.connection-url=
-##kylin.source.jdbc.driver=
-##kylin.source.jdbc.dialect=
-##kylin.source.jdbc.user=
-##kylin.source.jdbc.pass=
-##kylin.source.jdbc.sqoop-home=
-##kylin.source.jdbc.filed-delimiter=|
-#
-#### Livy with Kylin
-##kylin.engine.livy-conf.livy-enabled=false
-##kylin.engine.livy-conf.livy-url=http://LivyHost:8998
-##kylin.engine.livy-conf.livy-key.file=hdfs:///path-to-kylin-job-jar
-##kylin.engine.livy-conf.livy-arr.jars=hdfs:///path-to-hadoop-dependency-jar
-#
-#
-#### Realtime OLAP ###
-#
-## Where should local segment cache located, for absolute path, the real path will be ${KYLIN_HOME}/${kylin.stream.index.path}
-#kylin.stream.index.path=stream_index
-#
-## The timezone for Derived Time Column like hour_start, try set to GMT+N, please check detail at KYLIN-4010
-#kylin.stream.event.timezone=
-#
-## Debug switch for print realtime global dict encode information, please check detail at KYLIN-4141
-#kylin.stream.print-realtime-dict-enabled=false
-#
-## Should enable latest coordinator, please check detail at KYLIN-4167
-#kylin.stream.new.coordinator-enabled=true
-#
-## In which way should we collect receiver's metrics info
-##kylin.stream.metrics.option=console/csv/jmx
-#
-## When enable a streaming cube, should cousme from earliest offset or least offset
-#kylin.stream.consume.offsets.latest=true
-#
-## The parallelism of scan in receiver side
-#kylin.stream.receiver.use-threads-per-query=8
-#
-## How coordinator/receiver register itself into StreamMetadata, there are three option:
-## 1. hostname:port, then kylin will set the config ip and port as the currentNode;
-## 2. port, then kylin will get the node's hostname and append port as the currentNode;
-## 3. not set, then kylin will get the node hostname address and set the hostname and defaultPort(7070 for coordinator or 9090 for receiver) as the currentNode.
-##kylin.stream.node=
-#
-## Auto resubmit after job be discarded
-#kylin.stream.auto-resubmit-after-discard-enabled=true
diff --git a/kubernetes/config/production/kylin-all/kylin_hive_conf.xml b/kubernetes/config/production/kylin-all/kylin_hive_conf.xml
deleted file mode 100644
index f01d08e..0000000
--- a/kubernetes/config/production/kylin-all/kylin_hive_conf.xml
+++ /dev/null
@@ -1,102 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-    <property>
-        <name>dfs.replication</name>
-        <value>2</value>
-        <description>Block replication</description>
-    </property>
-
-    <property>
-        <name>hive.exec.compress.output</name>
-        <value>true</value>
-        <description>Enable compress</description>
-    </property>
-
-    <property>
-        <name>hive.auto.convert.join</name>
-        <value>true</value>
-        <description>Enables the optimization about converting common join into mapjoin</description>
-    </property>
-
-    <property>
-        <name>hive.auto.convert.join.noconditionaltask</name>
-        <value>true</value>
-        <description>enable map-side join</description>
-    </property>
-
-    <property>
-        <name>hive.auto.convert.join.noconditionaltask.size</name>
-        <value>100000000</value>
-        <description>enable map-side join</description>
-    </property>
-
-    <!--
-    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
-    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
-    -->
-    <!--
-    <property>
-        <name>mapreduce.map.output.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description></description>
-    </property>
-    -->
-    <!--
-    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
-    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
-    -->
-    <!--
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description></description>
-    </property>
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress.type</name>
-        <value>BLOCK</value>
-        <description>The compression type to use for job outputs</description>
-    </property>
-
-    -->
-    <property>
-        <name>mapreduce.job.split.metainfo.maxsize</name>
-        <value>-1</value>
-        <description>The maximum permissible size of the split metainfo file.
-            The JobTracker won't attempt to read split metainfo files bigger than
-            the configured value. No limits if set to -1.
-        </description>
-    </property>
-
-    <property>
-        <name>hive.stats.autogather</name>
-        <value>true</value>
-        <description>Collect statistics for newly created intermediate table</description>
-    </property>
-
-    <property>
-        <name>hive.merge.mapfiles</name>
-        <value>false</value>
-        <description>Disable Hive's auto merge</description>
-    </property>
-
-    <property>
-        <name>hive.merge.mapredfiles</name>
-        <value>false</value>
-        <description>Disable Hive's auto merge</description>
-    </property>
-</configuration>
diff --git a/kubernetes/config/production/kylin-all/kylin_job_conf.xml b/kubernetes/config/production/kylin-all/kylin_job_conf.xml
deleted file mode 100644
index 17a9145..0000000
--- a/kubernetes/config/production/kylin-all/kylin_job_conf.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-
-    <property>
-        <name>mapreduce.job.split.metainfo.maxsize</name>
-        <value>-1</value>
-        <description>The maximum permissible size of the split metainfo file.
-            The JobTracker won't attempt to read split metainfo files bigger than
-            the configured value. No limits if set to -1.
-        </description>
-    </property>
-
-    <property>
-        <name>mapreduce.map.output.compress</name>
-        <value>true</value>
-        <description>Compress map outputs</description>
-    </property>
-
-    <!--
-    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
-    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
-    -->
-    <!--
-    <property>
-        <name>mapreduce.map.output.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for map outputs
-        </description>
-    </property>
-    -->
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress</name>
-        <value>true</value>
-        <description>Compress the output of a MapReduce job</description>
-    </property>
-    <!--
-    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
-    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
-    -->
-    <!--
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for job outputs
-        </description>
-    </property>
-    -->
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress.type</name>
-        <value>BLOCK</value>
-        <description>The compression type to use for job outputs</description>
-    </property>
-
-
-    <property>
-        <name>mapreduce.job.max.split.locations</name>
-        <value>2000</value>
-        <description>No description</description>
-    </property>
-
-    <property>
-        <name>dfs.replication</name>
-        <value>2</value>
-        <description>Block replication</description>
-    </property>
-
-    <property>
-        <name>mapreduce.task.timeout</name>
-        <value>3600000</value>
-        <description>Set task timeout to 1 hour</description>
-    </property>
-
-</configuration>
diff --git a/kubernetes/config/production/kylin-all/kylin_job_conf_cube_merge.xml b/kubernetes/config/production/kylin-all/kylin_job_conf_cube_merge.xml
deleted file mode 100644
index 79365ad..0000000
--- a/kubernetes/config/production/kylin-all/kylin_job_conf_cube_merge.xml
+++ /dev/null
@@ -1,104 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-    <property>
-        <name>mapreduce.job.split.metainfo.maxsize</name>
-        <value>-1</value>
-        <description>The maximum permissible size of the split metainfo file.
-            The JobTracker won't attempt to read split metainfo files bigger than
-            the configured value. No limits if set to -1.
-        </description>
-    </property>
-
-    <property>
-        <name>mapreduce.map.output.compress</name>
-        <value>true</value>
-        <description>Compress map outputs</description>
-    </property>
-
-    <!--
-    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
-    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
-    -->
-    <!--
-    <property>
-        <name>mapreduce.map.output.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for map outputs
-        </description>
-    </property>
-    -->
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress</name>
-        <value>true</value>
-        <description>Compress the output of a MapReduce job</description>
-    </property>
-    <!--
-    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
-    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
-    -->
-    <!--
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for job outputs
-        </description>
-    </property>
-    -->
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress.type</name>
-        <value>BLOCK</value>
-        <description>The compression type to use for job outputs</description>
-    </property>
-
-    <property>
-        <name>mapreduce.job.max.split.locations</name>
-        <value>2000</value>
-        <description>No description</description>
-    </property>
-
-    <property>
-        <name>dfs.replication</name>
-        <value>2</value>
-        <description>Block replication</description>
-    </property>
-
-    <property>
-        <name>mapreduce.task.timeout</name>
-        <value>7200000</value>
-        <description>Set task timeout to 1 hour</description>
-    </property>
-
-    <!--Additional config for cube merge job, giving more memory -->
-    <property>
-        <name>mapreduce.map.memory.mb</name>
-        <value>3072</value>
-        <description></description>
-    </property>
-
-    <property>
-        <name>mapreduce.map.java.opts</name>
-        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
-        <description></description>
-    </property>
-
-    <property>
-        <name>mapreduce.task.io.sort.mb</name>
-        <value>200</value>
-        <description></description>
-    </property>
-</configuration>
diff --git a/kubernetes/config/production/kylin-all/kylin_job_conf_inmem.xml b/kubernetes/config/production/kylin-all/kylin_job_conf_inmem.xml
deleted file mode 100644
index ddda4dd..0000000
--- a/kubernetes/config/production/kylin-all/kylin_job_conf_inmem.xml
+++ /dev/null
@@ -1,111 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-    <property>
-        <name>mapreduce.job.is-mem-hungry</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>mapreduce.job.split.metainfo.maxsize</name>
-        <value>-1</value>
-        <description>The maximum permissible size of the split metainfo file.
-            The JobTracker won't attempt to read split metainfo files bigger than
-            the configured value. No limits if set to -1.
-        </description>
-    </property>
-
-    <property>
-        <name>mapreduce.map.output.compress</name>
-        <value>true</value>
-        <description>Compress map outputs</description>
-    </property>
-
-    <!--
-    The default map outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
-    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
-    -->
-    <!--
-    <property>
-        <name>mapreduce.map.output.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for map outputs
-        </description>
-    </property>
-    -->
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress</name>
-        <value>true</value>
-        <description>Compress the output of a MapReduce job</description>
-    </property>
-    <!--
-    The default job outputs compress codec is org.apache.hadoop.io.compress.DefaultCodec,
-    if SnappyCodec is supported, org.apache.hadoop.io.compress.SnappyCodec could be used.
-    -->
-    <!--
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress.codec</name>
-        <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-        <description>The compression codec to use for job outputs
-        </description>
-    </property>
-    -->
-    <property>
-        <name>mapreduce.output.fileoutputformat.compress.type</name>
-        <value>BLOCK</value>
-        <description>The compression type to use for job outputs</description>
-    </property>
-
-
-    <property>
-        <name>mapreduce.job.max.split.locations</name>
-        <value>2000</value>
-        <description>No description</description>
-    </property>
-
-    <property>
-        <name>dfs.replication</name>
-        <value>2</value>
-        <description>Block replication</description>
-    </property>
-
-    <property>
-        <name>mapreduce.task.timeout</name>
-        <value>7200000</value>
-        <description>Set task timeout to 1 hour</description>
-    </property>
-
-    <!--Additional config for in-mem cubing, giving mapper more memory -->
-    <property>
-        <name>mapreduce.map.memory.mb</name>
-        <value>3072</value>
-        <description></description>
-    </property>
-
-    <property>
-        <name>mapreduce.map.java.opts</name>
-        <value>-Xmx2700m -XX:OnOutOfMemoryError='kill -9 %p'</value>
-        <description></description>
-    </property>
-
-    <property>
-        <name>mapreduce.task.io.sort.mb</name>
-        <value>200</value>
-        <description></description>
-    </property>
-
-</configuration>
diff --git a/kubernetes/config/production/kylin-all/setenv-tool.sh b/kubernetes/config/production/kylin-all/setenv-tool.sh
deleted file mode 100644
index 487b5ef..0000000
--- a/kubernetes/config/production/kylin-all/setenv-tool.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# source me
-
-# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
-# uncomment following to for it to take effect
-export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx4096M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
-
-# Newer versions of glibc use an arena memory allocator that causes virtual
-# memory usage to explode. Tune the variable down to prevent vmem explosion.
-# See HADOOP-7154.
-export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
-
-# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.$$ -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
-
-# uncomment following to for it to take effect(the values need adjusting to fit your env)
-# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
-
-# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
-if [[ -d "/usr/hdp/current/hadoop-client" ]]
-then
-   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
-   # attempt to locate JVM native libraries and set corresponding property
-   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
-   then
-      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
-   fi
-else
-   export KYLIN_EXTRA_START_OPTS=""
-   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
-   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
-fi
-
-if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
-then
-    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
-    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
-else
-    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
-fi
-
-if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
-then
-    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
-    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
-else
-    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
-fi
-
-if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
-then
-    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
-    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
-else
-    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
-fi
diff --git a/kubernetes/config/production/kylin-all/setenv.sh b/kubernetes/config/production/kylin-all/setenv.sh
deleted file mode 100644
index fa88769..0000000
--- a/kubernetes/config/production/kylin-all/setenv.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# source me
-
-# (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
-# uncomment following to for it to take effect
-export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx2048M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
-
-# Newer versions of glibc use an arena memory allocator that causes virtual
-# memory usage to explode. Tune the variable down to prevent vmem explosion.
-# See HADOOP-7154.
-export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
-
-# export KYLIN_JVM_SETTINGS="-Xms16g -Xmx16g -XX:MaxPermSize=512m -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFi [...]
-
-# uncomment following to for it to take effect(the values need adjusting to fit your env)
-# export KYLIN_DEBUG_SETTINGS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
-
-# when running on HDP, try to determine the software stack version adn set hdp.version JVM property 
-if [[ -d "/usr/hdp/current/hadoop-client" ]]
-then
-   export KYLIN_EXTRA_START_OPTS="-Dhdp.version=`ls -l /usr/hdp/current/hadoop-client | awk -F'/' '{print $8}'`"
-   # attempt to locate JVM native libraries and set corresponding property
-   if [[ -d "/usr/hdp/current/hadoop-client/lib/native" ]]
-   then
-      export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/usr/hdp/current/hadoop-client/lib/native"
-   fi
-else
-   export KYLIN_EXTRA_START_OPTS=""
-   # uncomment the following line to set JVM native library path, the values need to reflect your environment and hardware architecture
-   # export KYLIN_LD_LIBRARY_SETTINGS="-Djava.library.path=/apache/hadoop/lib/native/Linux-amd64-64"
-fi
-
-if [ ! -z "${KYLIN_JVM_SETTINGS}" ]
-then
-    verbose "KYLIN_JVM_SETTINGS is ${KYLIN_JVM_SETTINGS}"
-    KYLIN_EXTRA_START_OPTS="${KYLIN_JVM_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
-else
-    verbose "KYLIN_JVM_SETTINGS is not set, using default jvm settings: ${KYLIN_JVM_SETTINGS}"
-fi
-
-if [ ! -z "${KYLIN_DEBUG_SETTINGS}" ]
-then
-    verbose "KYLIN_DEBUG_SETTINGS is ${KYLIN_DEBUG_SETTINGS}"
-    KYLIN_EXTRA_START_OPTS="${KYLIN_DEBUG_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
-else
-    verbose "KYLIN_DEBUG_SETTINGS is not set, will not enable remote debuging"
-fi
-
-if [ ! -z "${KYLIN_LD_LIBRARY_SETTINGS}" ]
-then
-    verbose "KYLIN_LD_LIBRARY_SETTINGS is ${KYLIN_LD_LIBRARY_SETTINGS}"
-    KYLIN_EXTRA_START_OPTS="${KYLIN_LD_LIBRARY_SETTINGS} ${KYLIN_EXTRA_START_OPTS}"
-else
-    verbose "KYLIN_LD_LIBRARY_SETTINGS is not set, it is okay unless you want to specify your own native path"
-fi
diff --git a/kubernetes/config/production/kylin-job/kylin.properties b/kubernetes/config/production/kylin-job/kylin.properties
index 43e67e2..9aff89a 100644
--- a/kubernetes/config/production/kylin-job/kylin.properties
+++ b/kubernetes/config/production/kylin-job/kylin.properties
@@ -15,23 +15,21 @@
 # limitations under the License.
 #
 
-
-kylin.cache.memcached.hosts=10.1.2.42:11211
+## Please modify the value to correct value, such as "cache-svc.kylin-example.svc.cluster.local:11211"
+kylin.cache.memcached.hosts=${MEMCACHED_HOST}:11211
 kylin.query.cache-signature-enabled=true
 kylin.query.lazy-query-enabled=true
 kylin.metrics.memcached.enabled=true
-kylin.query.segment-cache-enabled=true
-
+kylin.query.segment-cache-enabled=false
 
-kylin.metrics.monitor-enabled=true
-kylin.metrics.reporter-query-enabled=true
-kylin.metrics.reporter-job-enabled=true
+kylin.metrics.monitor-enabled=false
+kylin.metrics.reporter-query-enabled=false
+kylin.metrics.reporter-job-enabled=false
 
 # The below commented values will effect as default settings
 # Uncomment and override them if necessary
 
 
-
 #
 #### METADATA | ENV ###
 #
@@ -133,7 +131,7 @@ kylin.server.mode=job
 kylin.storage.hbase.table-name-prefix=K8S_
 #
 ## The namespace for hbase storage
-kylin.storage.hbase.namespace=lacus
+kylin.storage.hbase.namespace=DEFAULT
 #
 ## Compression codec for htable, valid value [none, snappy, lzo, gzip, lz4]
 #kylin.storage.hbase.compression-codec=none
diff --git a/kubernetes/config/production/kylin-job/setenv.sh b/kubernetes/config/production/kylin-job/setenv.sh
index fa88769..ab2d558 100644
--- a/kubernetes/config/production/kylin-job/setenv.sh
+++ b/kubernetes/config/production/kylin-job/setenv.sh
@@ -21,7 +21,7 @@
 
 # (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
 # uncomment following to for it to take effect
-export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx2048M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx16g -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
 
 # Newer versions of glibc use an arena memory allocator that causes virtual
 # memory usage to explode. Tune the variable down to prevent vmem explosion.
diff --git a/kubernetes/config/production/kylin-more/applicationContext.xml b/kubernetes/config/production/kylin-more/applicationContext.xml
index 5397044..94f22b6 100644
--- a/kubernetes/config/production/kylin-more/applicationContext.xml
+++ b/kubernetes/config/production/kylin-more/applicationContext.xml
@@ -106,6 +106,7 @@
         -->
 
 
+        <!-- comment following code to discard external query cache feature -->
         <bean id="ehcache" class="org.springframework.cache.ehcache.EhCacheManagerFactoryBean"
               p:configLocation="classpath:ehcache-test.xml" p:shared="true"/>
 
diff --git a/kubernetes/config/production/kylin-query/kylin.properties b/kubernetes/config/production/kylin-query/kylin.properties
index 1c5646f..aecd6dd 100644
--- a/kubernetes/config/production/kylin-query/kylin.properties
+++ b/kubernetes/config/production/kylin-query/kylin.properties
@@ -15,15 +15,16 @@
 # limitations under the License.
 #
 
-kylin.cache.memcached.hosts=10.1.2.42:11211
+## Please modify the value to correct value, such as "cache-svc.kylin-example.svc.cluster.local:11211"
+kylin.cache.memcached.hosts=${MEMCACHED_HOST}:11211
 kylin.query.cache-signature-enabled=true
 kylin.query.lazy-query-enabled=true
 kylin.metrics.memcached.enabled=true
-kylin.query.segment-cache-enabled=true
+kylin.query.segment-cache-enabled=false
 
-kylin.metrics.monitor-enabled=true
-kylin.metrics.reporter-query-enabled=true
-kylin.metrics.reporter-job-enabled=true
+kylin.metrics.monitor-enabled=false
+kylin.metrics.reporter-query-enabled=false
+kylin.metrics.reporter-job-enabled=false
 # The below commented values will effect as default settings
 # Uncomment and override them if necessary
 
diff --git a/kubernetes/config/production/kylin-query/setenv.sh b/kubernetes/config/production/kylin-query/setenv.sh
index fa88769..ab2d558 100644
--- a/kubernetes/config/production/kylin-query/setenv.sh
+++ b/kubernetes/config/production/kylin-query/setenv.sh
@@ -21,7 +21,7 @@
 
 # (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
 # uncomment following to for it to take effect
-export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx2048M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx16g -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
 
 # Newer versions of glibc use an arena memory allocator that causes virtual
 # memory usage to explode. Tune the variable down to prevent vmem explosion.
diff --git a/kubernetes/config/production/streaming-receiver/setenv.sh b/kubernetes/config/production/streaming-receiver/setenv.sh
index fa88769..ab2d558 100644
--- a/kubernetes/config/production/streaming-receiver/setenv.sh
+++ b/kubernetes/config/production/streaming-receiver/setenv.sh
@@ -21,7 +21,7 @@
 
 # (if your're deploying KYLIN on a powerful server and want to replace the default conservative settings)
 # uncomment following to for it to take effect
-export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx2048M -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
+export KYLIN_JVM_SETTINGS="-Xms1024M -Xmx16g -Xss1024K -XX:MaxPermSize=512M -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$KYLIN_HOME/logs/kylin.gc.%p -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=64M"
 
 # Newer versions of glibc use an arena memory allocator that causes virtual
 # memory usage to explode. Tune the variable down to prevent vmem explosion.
diff --git a/kubernetes/config/production/tomcat/context.xml b/kubernetes/config/production/tomcat/context.xml
new file mode 100644
index 0000000..943946c
--- /dev/null
+++ b/kubernetes/config/production/tomcat/context.xml
@@ -0,0 +1,48 @@
+<?xml version='1.0' encoding='utf-8'?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~  
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~  
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<!-- The contents of this file will be loaded for each web application -->
+<Context allowLinking="true">
+
+    <!-- Default set of monitored resources -->
+    <WatchedResource>WEB-INF/web.xml</WatchedResource>
+
+    <!-- Uncomment this to disable session persistence across Tomcat restarts -->
+    <!--
+    <Manager pathname="" />
+    -->
+
+    <!-- Uncomment this to enable Comet connection tacking (provides events
+         on session expiration as well as webapp lifecycle) -->
+    <!--
+    <Valve className="org.apache.catalina.valves.CometConnectionManagerValve" />
+    -->
+
+    <Loader loaderClass="org.apache.kylin.ext.CustomizedWebappClassloader"/>
+
+
+    <!-- Please change here : enable tomcat session sharing -->
+    <Manager className="de.javakaffee.web.msm.MemcachedBackupSessionManager"
+             memcachedNodes="n1:{kylin-memcached-server}:11211,n2:{kylin-memcached-server}:11211..."
+             failoverNodes="n1"
+             storageKeyPrefix="context"
+             requestUriIgnorePattern=".*\.(ico|png|gif|jpg|css|js)$"
+    />
+
+
+</Context>
diff --git a/kubernetes/config/production/tomcat/server.xml b/kubernetes/config/production/tomcat/server.xml
new file mode 100644
index 0000000..c626bc4
--- /dev/null
+++ b/kubernetes/config/production/tomcat/server.xml
@@ -0,0 +1,142 @@
+<?xml version='1.0' encoding='utf-8'?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!-- Note:  A "Server" is not itself a "Container", so you may not
+     define subcomponents such as "Valves" at this level.
+     Documentation at /docs/config/server.html
+ -->
+<Server port="9005" shutdown="SHUTDOWN">
+    <!-- Security listener. Documentation at /docs/config/listeners.html
+    <Listener className="org.apache.catalina.security.SecurityListener" />
+    -->
+    <!--APR library loader. Documentation at /docs/apr.html -->
+    <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
+    <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
+    <Listener className="org.apache.catalina.core.JasperListener" />
+    <!-- Prevent memory leaks due to use of particular java/javax APIs-->
+    <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
+    <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
+    <Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
+
+    <!-- Global JNDI resources
+         Documentation at /docs/jndi-resources-howto.html
+    -->
+    <GlobalNamingResources>
+        <!-- Editable user database that can also be used by
+             UserDatabaseRealm to authenticate users
+        -->
+        <Resource name="UserDatabase" auth="Container"
+                  type="org.apache.catalina.UserDatabase"
+                  description="User database that can be updated and saved"
+                  factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
+                  pathname="conf/tomcat-users.xml" />
+    </GlobalNamingResources>
+
+    <!-- A "Service" is a collection of one or more "Connectors" that share
+         a single "Container" Note:  A "Service" is not itself a "Container",
+         so you may not define subcomponents such as "Valves" at this level.
+         Documentation at /docs/config/service.html
+     -->
+    <Service name="Catalina">
+
+        <!--The connectors can use a shared executor, you can define one or more named thread pools-->
+        <!--
+        <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
+            maxThreads="150" minSpareThreads="4"/>
+        -->
+
+
+        <!-- A "Connector" represents an endpoint by which requests are received
+             and responses are returned. Documentation at :
+             Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)
+             Java AJP  Connector: /docs/config/ajp.html
+             APR (HTTP/AJP) Connector: /docs/apr.html
+             Define a non-SSL HTTP/1.1 Connector on port 8080
+        -->
+        <Connector port="7070" protocol="HTTP/1.1"
+                   connectionTimeout="20000"
+                   redirectPort="7443"
+                   compression="on"
+                   compressionMinSize="2048"
+                   noCompressionUserAgents="gozilla,traviata"
+                   compressableMimeType="text/html,text/xml,text/javascript,application/javascript,application/json,text/css,text/plain"
+                   URIEncoding="UTF-8"
+        />
+        <!-- A "Connector" using the shared thread pool-->
+        <!-- Define a SSL HTTP/1.1 Connector on port 8443
+             This connector uses the BIO implementation that requires the JSSE
+             style configuration. When using the APR/native implementation, the
+             OpenSSL style configuration is required as described in the APR/native
+             documentation -->
+        <Connector port="7443" protocol="org.apache.coyote.http11.Http11Protocol"
+                   maxThreads="150" SSLEnabled="true" scheme="https" secure="true"
+                   keystoreFile="conf/.keystore" keystorePass="changeit"
+                   clientAuth="false" sslProtocol="TLS" />
+
+        <!-- Define an AJP 1.3 Connector on port 8009 -->
+        <Connector port="9009" protocol="AJP/1.3" redirectPort="9443" />
+
+
+        <!-- An Engine represents the entry point (within Catalina) that processes
+             every request.  The Engine implementation for Tomcat stand alone
+             analyzes the HTTP headers included with the request, and passes them
+             on to the appropriate Host (virtual host).
+             Documentation at /docs/config/engine.html -->
+
+        <!-- You should set jvmRoute to support load-balancing via AJP ie :
+        <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
+        -->
+        <Engine name="Catalina" defaultHost="localhost">
+
+            <!--For clustering, please take a look at documentation at:
+                /docs/cluster-howto.html  (simple how to)
+                /docs/config/cluster.html (reference documentation) -->
+            <!--
+            <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
+            -->
+
+            <!-- Use the LockOutRealm to prevent attempts to guess user passwords
+                 via a brute-force attack -->
+            <Realm className="org.apache.catalina.realm.LockOutRealm">
+                <!-- This Realm uses the UserDatabase configured in the global JNDI
+                     resources under the key "UserDatabase".  Any edits
+                     that are performed against this UserDatabase are immediately
+                     available for use by the Realm.  -->
+                <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
+                       resourceName="UserDatabase"/>
+            </Realm>
+
+            <Host name="localhost"  appBase="webapps"
+                  unpackWARs="true" autoDeploy="true">
+
+                <!-- SingleSignOn valve, share authentication between web applications
+                     Documentation at: /docs/config/valve.html -->
+                <!--
+                <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
+                -->
+
+                <!-- Access log processes all example.
+                     Documentation at: /docs/config/valve.html
+                     Note: The pattern used is equivalent to using pattern="common" -->
+                <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
+                       prefix="localhost_access_log." suffix=".txt"
+                       pattern="%h %l %u %t &quot;%r&quot; %s %b %D %{User-Agent}i" />
+
+            </Host>
+        </Engine>
+    </Service>
+</Server>
diff --git a/kubernetes/config/quickstart/hadoop/core-site.xml b/kubernetes/config/quickstart/hadoop/core-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/quickstart/hadoop/core-site.xml
+++ b/kubernetes/config/quickstart/hadoop/core-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/config/quickstart/hadoop/hbase-site.xml b/kubernetes/config/quickstart/hadoop/hbase-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/quickstart/hadoop/hbase-site.xml
+++ b/kubernetes/config/quickstart/hadoop/hbase-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/config/quickstart/hadoop/hdfs-site.xml b/kubernetes/config/quickstart/hadoop/hdfs-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/quickstart/hadoop/hdfs-site.xml
+++ b/kubernetes/config/quickstart/hadoop/hdfs-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/config/quickstart/hadoop/hive-site.xml b/kubernetes/config/quickstart/hadoop/hive-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/quickstart/hadoop/hive-site.xml
+++ b/kubernetes/config/quickstart/hadoop/hive-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/config/quickstart/hadoop/mapred-site.xml b/kubernetes/config/quickstart/hadoop/mapred-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/quickstart/hadoop/mapred-site.xml
+++ b/kubernetes/config/quickstart/hadoop/mapred-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/config/quickstart/hadoop/yarn-site.xml b/kubernetes/config/quickstart/hadoop/yarn-site.xml
index 9108ad8..6b981ee 100644
--- a/kubernetes/config/quickstart/hadoop/yarn-site.xml
+++ b/kubernetes/config/quickstart/hadoop/yarn-site.xml
@@ -1,4 +1,19 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
 </configuration>
diff --git a/kubernetes/docker/README b/kubernetes/docker/README
deleted file mode 100644
index 58528cc..0000000
--- a/kubernetes/docker/README
+++ /dev/null
@@ -1 +0,0 @@
-Please check README under `hadoop-client` and `kylin-client` for detail.
\ No newline at end of file
diff --git a/kubernetes/docker/README.md b/kubernetes/docker/README.md
new file mode 100644
index 0000000..2b03530
--- /dev/null
+++ b/kubernetes/docker/README.md
@@ -0,0 +1,10 @@
+> Please check README under `hadoop-client` and `kylin-client` for detail.
+
+After kylin-client image is ready, you can use `docker` command to save or push the image.
+
+```shell
+# Save and share it with others.
+docker save -o kylin-cdh.tar kylin-client:3.0.1-cdh57
+# Or push it to registry
+docker push apachekylin/kylin-client:3.0.1-cdh57
+```
\ No newline at end of file
diff --git a/kubernetes/docker/hadoop-client/CDH57/Dockerfile b/kubernetes/docker/hadoop-client/CDH57/Dockerfile
index 1f5d203..a3df814 100644
--- a/kubernetes/docker/hadoop-client/CDH57/Dockerfile
+++ b/kubernetes/docker/hadoop-client/CDH57/Dockerfile
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 FROM centos:7.3.1611
 
 MAINTAINER Apache Kylin
diff --git a/kubernetes/docker/hadoop-client/CDH57/build-image.sh b/kubernetes/docker/hadoop-client/CDH57/build-image.sh
index 5b51657..22c3122 100644
--- a/kubernetes/docker/hadoop-client/CDH57/build-image.sh
+++ b/kubernetes/docker/hadoop-client/CDH57/build-image.sh
@@ -1 +1,17 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 docker build -t hadoop-client:cdh57 .
\ No newline at end of file
diff --git a/kubernetes/docker/hadoop-client/README.MD b/kubernetes/docker/hadoop-client/README.MD
deleted file mode 100644
index e977d4a..0000000
--- a/kubernetes/docker/hadoop-client/README.MD
+++ /dev/null
@@ -1,9 +0,0 @@
-## Background
-What is hadoop-client docker images? 
-And why we need this?
-
-## Build Step
-1. Place Spark binary(*spark-2.3.2-bin-hadoop2.7.tgz*) into dir `provided-binary`.
-2. Depend on which Hadoop Distribution you are use, prepare `Dockerfile`, please take `CDH-5.7` as an example
-3. Run `build-image.sh` to build image.
-
diff --git a/kubernetes/docker/hadoop-client/README.md b/kubernetes/docker/hadoop-client/README.md
new file mode 100644
index 0000000..b2d4c2d
--- /dev/null
+++ b/kubernetes/docker/hadoop-client/README.md
@@ -0,0 +1,24 @@
+## Background
+### What is hadoop-client docker image and why we need this? 
+As we all know, the node you want to deploy Kylin, should has provided Hadoop 
+dependency(jars and configuration files), these dependency let you have access
+ to Hadoop Service, such as HDFS, HBase, Hive, which are needed by Apache Kylin. 
+Unfortunately, each Hadoop distribution(CHD or HDP etc.) has its own specific jars. So, we 
+can provided specific image for specific Hadoop distribution, which will make image management task
+more easier. This will have following two benefits:
+1. Someone who has better knowledge on Hadoop can do this work, and let kylin 
+ user build their Kylin image base on provided Hadoop-Client image.
+2. Upgrade Kylin will be much easier.
+
+### Build step for CDH5.7
+1. Working directory is `docker/hadoop-client/CDH57`.
+2. Place Spark binary(spark-2.3.2-bin-hadoop2.7.tgz) into dir `provided-binary` directory.
+3. Run `build-image.sh` to build image.
+
+### Build step for vanilla Hadoop
+1. Working directory is `docker/hadoop-client/apache-hadoop2.7`.
+2. Download required hadoop binary files and put them into `hadoop-binary` directory.
+3. Run `build-image.sh` to build image.
+
+> If you are using other hadoop distribution, please considering refer to provided `Dockerfile` and write your own.
+
diff --git a/kubernetes/docker/hadoop-client/apache-hadoop2.7/Dockerfile b/kubernetes/docker/hadoop-client/apache-hadoop2.7/Dockerfile
index d45f1b9..877df7a 100644
--- a/kubernetes/docker/hadoop-client/apache-hadoop2.7/Dockerfile
+++ b/kubernetes/docker/hadoop-client/apache-hadoop2.7/Dockerfile
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 FROM centos:7.3.1611
 
 MAINTAINER Apache Kylin
@@ -29,40 +46,35 @@ ENV JAVA_HOME /etc/alternatives/jre
 
 # install hadoop
 ENV HADOOP_HOME=$APACHE_HOME/hadoop
-ADD hdp-clients/hadoop-${HADOOP_VERSION}.tar.gz $APACHE_HOME
+ADD hadoop-binary/hadoop-${HADOOP_VERSION}.tar.gz $APACHE_HOME
 RUN set -x \
     && ln -s $APACHE_HOME/hadoop-${HADOOP_VERSION} $HADOOP_HOME
 
 #install hive
 ENV HIVE_HOME=$APACHE_HOME/hive 
-ADD hdp-clients/apache-hive-${HIVE_VERSION}.tar.gz $APACHE_HOME
+ADD hadoop-binary/apache-hive-${HIVE_VERSION}.tar.gz $APACHE_HOME
 RUN set -x \
     && ln -s $APACHE_HOME/apache-hive-${HIVE_VERSION} $HIVE_HOME
 
 #install hbase
 ENV HBASE_HOME=$APACHE_HOME/hbase
-ADD hdp-clients/hbase-${HBASE_VERSION}.tar.gz $APACHE_HOME
+ADD hadoop-binary/hbase-${HBASE_VERSION}.tar.gz $APACHE_HOME
 RUN set -x \
     && ln -s $APACHE_HOME/hbase-${HBASE_VERSION} $HBASE_HOME
 
 #install spark
 ENV SPARK_HOME=$APACHE_HOME/spark
-ADD hdp-clients/spark-${SPARK_VERSION}.tar.gz $APACHE_HOME
+ADD hadoop-binary/spark-${SPARK_VERSION}.tar.gz $APACHE_HOME
 RUN set -x \
     && ln -s $APACHE_HOME/spark-${SPARK_VERSION} $SPARK_HOME
 
 #install zk
 ENV ZK_HOME=$APACHE_HOME/zookeeper
-ADD hdp-clients/zookeeper-${ZK_VERSION}.tar.gz $APACHE_HOME
+ADD hadoop-binary/zookeeper-${ZK_VERSION}.tar.gz $APACHE_HOME
 RUN set -x \
     && ln -s $APACHE_HOME/zookeeper-${ZK_VERSION} $ZK_HOME
 
 ENV PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HIVE_HOME/bin:$HBASE_HOME/bin:$ZK_HOME/bin
 
-COPY conf/hadoop $HADOOP_HOME/etc/hadoop
-COPY conf/hbase $HBASE_HOME/conf
-COPY conf/hive $HIVE_HOME/conf
-COPY conf/krb5.conf /etc/krb5.conf
-
 # Cleanup
 RUN rm -rf /tmp/*
diff --git a/kubernetes/docker/hadoop-client/apache-hadoop2.7/build-image.sh b/kubernetes/docker/hadoop-client/apache-hadoop2.7/build-image.sh
index 3f8ff8d..c6f14d2 100644
--- a/kubernetes/docker/hadoop-client/apache-hadoop2.7/build-image.sh
+++ b/kubernetes/docker/hadoop-client/apache-hadoop2.7/build-image.sh
@@ -1 +1,17 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 docker build -t hadoop-client:apache-hadoop2.7 .
\ No newline at end of file
diff --git a/kubernetes/docker/kylin-client/Dockerfile b/kubernetes/docker/kylin-client/Dockerfile
index 6ab5d2a..ef30645 100644
--- a/kubernetes/docker/kylin-client/Dockerfile
+++ b/kubernetes/docker/kylin-client/Dockerfile
@@ -1,4 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 FROM hadoop-client:cdh57
+
+MAINTAINER Apache Kylin
+
 ARG USER=apache_kylin
 ARG USER_HOME=/home/${USER}
 ARG KYLIN_VERSION=apache-kylin-3.0.1-bin-cdh57
@@ -11,8 +31,6 @@ ARG SPARK_CONF_HOME=$KYLIN_HOME/hadoop-conf
 ENV SPARK_HOME /opt/spark-2.3.2-bin-hadoop2.7
 ENV KYLIN_HOME=$USER_HOME/kylin
 
-MAINTAINER Apache Kylin
-
 WORKDIR /tmp
 
 # install system tools
@@ -38,7 +56,7 @@ COPY --chown=$USER:$USER $KYLIN_VERSION $KYLIN_HOME
 
 RUN set -x \
     && unzip -qq $KYLIN_HOME/tomcat/webapps/kylin.war -d $KYLIN_HOME/tomcat/webapps/kylin \
-#    && chown -R $USER:$USER $KYLIN_HOME/tomcat/webapps/kylin \
+    && chown -R $USER:$USER $KYLIN_HOME/tomcat/webapps/kylin \
     && rm $KYLIN_HOME/tomcat/webapps/kylin.war \
     && mkdir $SPARK_CONF_HOME \
     && ln -s $HADOOP_CONF_HOME/core-site.xml $SPARK_CONF_HOME/core-site.xml \
@@ -60,5 +78,5 @@ RUN /usr/bin/crontab -u $USER /tmp/crontab.txt \
     && chmod 755 $TOOL_HOME/*
 EXPOSE 7070
 
-USER root
-CMD ["sh", "-c", "$TOOL_HOME/bootstrap.sh server -d"]
+USER $USER
+CMD ["sh", "-c", "$TOOL_HOME/bootstrap.sh server -d"]
\ No newline at end of file
diff --git a/kubernetes/docker/kylin-client/README b/kubernetes/docker/kylin-client/README
deleted file mode 100644
index bfc5a84..0000000
--- a/kubernetes/docker/kylin-client/README
+++ /dev/null
@@ -1,8 +0,0 @@
-## Background
-What is kylin-client docker images?
-And why we need this?
-
-## Build Step
-1. Place Kylin binary(*spark-2.3.2-bin-hadoop2.7.tgz*) and uncompress it into current dir.
-2. Modify `Dockerfile` , change the value of `KYLIN_VERSION` and name of base image(hadoop-client).
-3. Run `build-image.sh` to build image.
\ No newline at end of file
diff --git a/kubernetes/docker/kylin-client/README.md b/kubernetes/docker/kylin-client/README.md
new file mode 100644
index 0000000..c76e822
--- /dev/null
+++ b/kubernetes/docker/kylin-client/README.md
@@ -0,0 +1,9 @@
+## Background
+What is kylin-client docker images? And why we need this?
+kylin-client is a based docker image which based on hadoop-client, it will provided the 
+flexibility of upgrade of Apache Kylin.
+
+## Build Step
+1. Place Kylin binary(*apache-kylin-3.0.1-bin-cdh57*) and uncompress it into current dir.
+2. Modify `Dockerfile` , change the value of `KYLIN_VERSION` and the name of base image(hadoop-client).
+3. Run `build-image.sh` to build image.
\ No newline at end of file
diff --git a/kubernetes/docker/kylin-client/bin/bootstrap.sh b/kubernetes/docker/kylin-client/bin/bootstrap.sh
index 911f845..a38a0b2 100755
--- a/kubernetes/docker/kylin-client/bin/bootstrap.sh
+++ b/kubernetes/docker/kylin-client/bin/bootstrap.sh
@@ -1,5 +1,22 @@
 #!/bin/bash
 
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 # enable cron job
 sudo crond -i -p
 
diff --git a/kubernetes/docker/kylin-client/bin/check-liveness.sh b/kubernetes/docker/kylin-client/bin/check-liveness.sh
index cc1f786..b7b9877 100644
--- a/kubernetes/docker/kylin-client/bin/check-liveness.sh
+++ b/kubernetes/docker/kylin-client/bin/check-liveness.sh
@@ -1 +1,18 @@
-#!/bin/bash
\ No newline at end of file
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
\ No newline at end of file
diff --git a/kubernetes/docker/kylin-client/bin/check-readiness.sh b/kubernetes/docker/kylin-client/bin/check-readiness.sh
index a9bf588..b7b9877 100644
--- a/kubernetes/docker/kylin-client/bin/check-readiness.sh
+++ b/kubernetes/docker/kylin-client/bin/check-readiness.sh
@@ -1 +1,18 @@
 #!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
\ No newline at end of file
diff --git a/kubernetes/docker/kylin-client/bin/clean-log.sh b/kubernetes/docker/kylin-client/bin/clean-log.sh
index 5b76d68..7a7be87 100644
--- a/kubernetes/docker/kylin-client/bin/clean-log.sh
+++ b/kubernetes/docker/kylin-client/bin/clean-log.sh
@@ -1,6 +1,23 @@
 #!/bin/bash
 
-export KYLIN_HOME=/home/b_kylin/kylin2
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+export KYLIN_HOME=/home/apache_kylin/kylin
 
 # Rotate kylin out
 timestamp=`date +%Y-%m-%d`
diff --git a/kubernetes/docker/kylin-client/build-image.sh b/kubernetes/docker/kylin-client/build-image.sh
index daa9743..0032b81 100644
--- a/kubernetes/docker/kylin-client/build-image.sh
+++ b/kubernetes/docker/kylin-client/build-image.sh
@@ -1,2 +1,20 @@
-docker build -t kylin-client:3.0.1-cdh57 .
-docker save -o kylin-client-3.0.1-cdh57.tar kylin-client:3.0.1-cdh57
\ No newline at end of file
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+docker build -t kylin-client:3.0.1-cdh57 .
\ No newline at end of file
diff --git a/kubernetes/docker/upload.sh b/kubernetes/docker/upload.sh
deleted file mode 100644
index 9c2c57d..0000000
--- a/kubernetes/docker/upload.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-docker save -o kylin-cdh.tar kylin-client:3.0.1-cdh57
-scp kylin-cdh.tar ubuntu@10.1.2.41/home/ubuntu/
\ No newline at end of file
diff --git a/kubernetes/example/README b/kubernetes/example/README
deleted file mode 100644
index 44416d1..0000000
--- a/kubernetes/example/README
+++ /dev/null
@@ -1,10 +0,0 @@
-## Example
-
-> This dir provided a deployment template with all required files.
-
-### Pre-requirement
-
-- A healthy CDH 5.7 cluster
-- A healthy on-premise K8s cluster
-
-### Step
\ No newline at end of file
diff --git a/kubernetes/example/deployment/kylin-job/kylin-job-service.yaml b/kubernetes/example/deployment/kylin-job/kylin-job-service.yaml
deleted file mode 100644
index bbcee62..0000000
--- a/kubernetes/example/deployment/kylin-job/kylin-job-service.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: kylin-job-svc
-  namespace: kylin-example
-spec:
-  ports:
-  - name: http
-    port: 80
-    targetPort: 7070
-  - name: https
-    port: 443
-    targetPort: 7443
-  selector:
-    app: kylin
-    role: job
-  type: LoadBalancer
\ No newline at end of file
diff --git a/kubernetes/example/deployment/kylin-query/kylin-query-service.yaml b/kubernetes/example/deployment/kylin-query/kylin-query-service.yaml
deleted file mode 100644
index bf66fd7..0000000
--- a/kubernetes/example/deployment/kylin-query/kylin-query-service.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: kylin-query-svc
-  namespace: kylin-example
-spec:
-  ports:
-  - name: http
-    port: 80
-    targetPort: 7070
-  - name: https
-    port: 443
-    targetPort: 7443
-  selector:
-    app: kylin
-    role: query
-  type: LoadBalancer
\ No newline at end of file
diff --git a/kubernetes/example/deployment/memcached/memcached-service.yaml b/kubernetes/example/deployment/memcached/memcached-service.yaml
deleted file mode 100644
index 6ea50e3..0000000
--- a/kubernetes/example/deployment/memcached/memcached-service.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: cache-svc
-  namespace: kylin-example
-spec:
-  clusterIP: None
-  selector: 
-    app: kylin-memcached
-    role: cache
-  ports:
-  - port: 11211
-    targetPort: 11211
\ No newline at end of file
diff --git a/kubernetes/example/deployment/memcached/memcached-statefulset.yaml b/kubernetes/example/deployment/memcached/memcached-statefulset.yaml
deleted file mode 100644
index ad58811..0000000
--- a/kubernetes/example/deployment/memcached/memcached-statefulset.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: kylin-memcached
-  namespace: kylin-example
-spec:
-  serviceName: cache-svc
-  replicas: 1
-  selector:
-    matchLabels:
-      app: kylin-memcached
-      role: cache
-  template:
-    metadata:
-      labels:
-        app: kylin-memcached
-        role: cache
-    spec:
-      containers:
-      - image: memcached:1.4.39
-        name: memcached
-        ports:
-        - containerPort: 11211
-        args:
-        - "-m 20480"
-        resources:
-          limits:
-            cpu: 1
-            memory: 1Gi
-          requests:
-            cpu: 1
-            memory: 1Gi
-        livenessProbe:
-          tcpSocket:
-            port: 11211
-          initialDelaySeconds: 30
-          timeoutSeconds: 5
-        readinessProbe:
-          tcpSocket:
-            port: 11211
-          initialDelaySeconds: 5
-          timeoutSeconds: 1
\ No newline at end of file
diff --git a/kubernetes/template/README b/kubernetes/template/README
deleted file mode 100644
index e69de29..0000000
diff --git a/kubernetes/template/production/check-deploy.sh b/kubernetes/template/production/check-deploy.sh
deleted file mode 100644
index 529cf36..0000000
--- a/kubernetes/template/production/check-deploy.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-## Check status
-kubectl get statefulset -n kylin-prod
-
-kubectl get pod -n kylin-prod
-
-kubectl get service -n kylin-prod
-
-## Check detail
-kubectl describe pod -n kylin-prod
\ No newline at end of file
diff --git a/kubernetes/template/production/cleanup.sh b/kubernetes/template/production/cleanup.sh
index 1b72b64..b0f357f 100644
--- a/kubernetes/template/production/cleanup.sh
+++ b/kubernetes/template/production/cleanup.sh
@@ -1,6 +1,21 @@
-kubectl delete -f deployment/memcached/memcached-service.yaml
-kubectl delete -f deployment/memcached/memcached-statefulset.yaml
+#!/bin/bash
 
-kubectl delete -f deployment/kylin/kylin-service.yaml
-kubectl delete -f deployment/kylin/kylin-all-statefulset.yaml
-kubectl delete -f deployment/kylin/kylin-job-statefulset.yaml
\ No newline at end of file
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+kubectl delete -f deployment/memcached/
+kubectl delete -f deployment/kylin/
\ No newline at end of file
diff --git a/kubernetes/template/production/deploy-kylin.sh b/kubernetes/template/production/deploy-kylin.sh
index add393f..d6a0154 100644
--- a/kubernetes/template/production/deploy-kylin.sh
+++ b/kubernetes/template/production/deploy-kylin.sh
@@ -1,12 +1,26 @@
-echo """
-Hello, welcome to deploy Kylin on Kubernetes.
-BTW, this is a quick start template for demo usage.
-"""
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
 
 ## create namespace
 kubectl create namespace kylin-prod
 
-## Create configmap
+## Create configmap(please consider using secret in production env)
 kubectl create configmap -n kylin-prod hadoop-config \
     --from-file=../../config/production/hadoop/core-site.xml \
     --from-file=../../config/production/hadoop/hdfs-site.xml \
@@ -84,25 +98,9 @@ kubectl create configmap -n kylin-prod tomcat-config  \
     --dry-run -o yaml | kubectl apply -f -
 
 ### Prepare memcached service
-
-kubectl create -f deployment/memcached/memcached-service.yaml
-kubectl create -f deployment/memcached/memcached-statefulset.yaml
-
+kubectl apply -f deployment/memcached/
 
 ### Prepare kylin service
-
-
-## Create headless serivce
-kubectl create -f deployment/kylin/kylin-service.yaml
-
-## Create statefulset
-kubectl create -f deployment/kylin/kylin-all-statefulset.yaml
-kubectl create -f deployment/kylin/kylin-job-statefulset.yaml
-
-#kubectl create -f deployment/kylin/kylin-query-statefulset.yaml
-#kubectl create -f deployment/kylin/kylin-receiver-statefulset.yaml
-
-kubectl delete -f deployment/kylin/kylin-all-statefulset.yaml
-kubectl delete -f deployment/kylin/kylin-job-statefulset.yaml
+kubectl apply -f deployment/kylin/
 
 
diff --git a/kubernetes/template/production/deployment/kylin/kylin-all-statefulset.yaml b/kubernetes/template/production/deployment/kylin/kylin-all-statefulset.yaml
deleted file mode 100644
index 71bb07f..0000000
--- a/kubernetes/template/production/deployment/kylin/kylin-all-statefulset.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: kylin-all
-  namespace: kylin-prod
-spec:
-  serviceName: kylin-svc
-  replicas: 1
-  selector:
-    matchExpressions:
-    - key: app
-      operator: In
-      values:
-      - kylin
-    - key: query
-      operator: In
-      values:
-      - "true"
-    - key: job
-      operator: In
-      values:
-      - "true"
-  template:
-    metadata:
-      labels:
-        app: kylin
-        query: "true"
-        job: "true"
-    spec:
-      hostAliases:
-        - ip: "10.1.3.90"
-          hostnames:
-            - "cdh-master"
-        - ip: "10.1.3.93"
-          hostnames:
-            - "cdh-worker-1"
-        - ip: "10.1.3.94"
-          hostnames:
-            - "cdh-worker-2"
-        - ip: "10.1.3.91"
-          hostnames:
-            - "cdh-client"
-      containers:
-      - name: kylin
-        image: kylin-client:3.0.1-cdh57
-        imagePullPolicy: IfNotPresent
-        command:
-        - sh
-        - -c
-        args:
-        - cp $KYLIN_HOME/tomcat-config/* $KYLIN_HOME/tomcat/conf;
-          cp $KYLIN_HOME/kylin-more-config $KYLIN_HOME/tomcat/webapps/kylin/WEB-INF/classes/;
-          $TOOL_HOME/bootstrap.sh server -d;
-        ports:
-        - containerPort: 7070
-        - containerPort: 7443
-        volumeMounts:
-        - name: kylin-all-config
-          mountPath: /home/apache_kylin/kylin/conf
-        - name: tomcat-config
-          mountPath: /home/apache_kylin/kylin/tomcat-config
-        - name: kylin-more-config
-          mountPath: /home/apache_kylin/kylin/kylin-more-config/
-        - name: hadoop-config
-          mountPath: /etc/hadoop/conf
-        - name: hive-config
-          mountPath: /etc/hive/conf
-        - name: hbase-config
-          mountPath: /etc/hbase/conf
-        - name: kylin-logs
-          mountPath: /home/apache_kylin/kylin/logs
-        - name: tomcat-logs
-          mountPath: /home/apache_kylin/kylin/tomcat/logs
-        resources:
-          requests:
-            memory: 2Gi
-            cpu: 1
-          limits:
-            memory: 2Gi
-            cpu: 1
-      - name: filebeat
-        image: docker.elastic.co/beats/filebeat:6.4.3
-        args:
-        - -c
-        - /usr/share/filebeat/config/filebeat.yml
-        - -e
-        volumeMounts:
-        - name: kylin-logs
-          mountPath: /home/apache_kylin/kylin/logs
-        - name: tomcat-logs
-          mountPath: /home/apache_kylin/kylin/tomcat/logs
-        - name: filebeat-config
-          mountPath: /usr/share/filebeat/config
-          readOnly: true
-        resources:
-          requests:
-            memory: 2Gi
-            cpu: 1
-          limits:
-            memory: 2Gi
-            cpu: 1
-      volumes:
-      - name: kylin-logs
-        emptyDir:
-          sizeLimit: 3Gi
-      - name: tomcat-logs
-        emptyDir:
-          sizeLimit: 2Gi
-      - configMap:
-          name: hadoop-config
-        name: hadoop-config
-      - configMap:
-          name: hive-config
-        name: hive-config
-      - configMap:
-          name: hbase-config
-        name: hbase-config
-      - configMap:
-          name: kylin-all-config
-        name: kylin-all-config
-      - configMap:
-          name: tomcat-config
-        name: tomcat-config
-      - configMap:
-          name: filebeat-config
-        name: filebeat-config
-      - configMap:
-          name: kylin-more-config
-        name: kylin-more-config
\ No newline at end of file
diff --git a/kubernetes/template/production/deployment/kylin/kylin-job-statefulset.yaml b/kubernetes/template/production/deployment/kylin/kylin-job-statefulset.yaml
index d04cc1b..494742a 100644
--- a/kubernetes/template/production/deployment/kylin/kylin-job-statefulset.yaml
+++ b/kubernetes/template/production/deployment/kylin/kylin-job-statefulset.yaml
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 apiVersion: apps/v1
 kind: StatefulSet
 metadata:
@@ -7,42 +24,18 @@ spec:
   serviceName: kylin-svc
   replicas: 2
   selector:
-    matchExpressions:
-    - key: app
-      operator: In
-      values:
-      - kylin
-    - key: query
-      operator: In
-      values:
-      - "false"
-    - key: job
-      operator: In
-      values:
-      - "true"
+    matchLabels:
+      app: kylin
+      role: job
   template:
     metadata:
       labels:
         app: kylin
-        query: "false"
-        job: "true"
+        role: job
     spec:
-      hostAliases:
-        - ip: "10.1.3.90"
-          hostnames:
-            - "cdh-master"
-        - ip: "10.1.3.92"
-          hostnames:
-            - "cdh-worker-1"
-        - ip: "10.1.3.93"
-          hostnames:
-            - "cdh-worker-2"
-        - ip: "10.1.3.91"
-          hostnames:
-            - "cdh-client"
       containers:
       - name: kylin
-        image: kylin-client:3.0.1-cdh57
+        image: kylin-client:{VERSION}
         imagePullPolicy: IfNotPresent
         command:
         - sh
@@ -73,11 +66,11 @@ spec:
           mountPath: /home/apache_kylin/kylin/tomcat/logs
         resources:
           requests:
-            memory: 2Gi
-            cpu: 800m
+            memory: 16Gi
+            cpu: 8
           limits:
-            memory: 2Gi
-            cpu: 800m
+            memory: 16Gi
+            cpu: 8
       - name: filebeat
         image: docker.elastic.co/beats/filebeat:6.4.3
         args:
@@ -94,18 +87,18 @@ spec:
           readOnly: true
         resources:
           requests:
-            memory: 1Gi
-            cpu: 400m
+            memory: 3Gi
+            cpu: 2
           limits:
-            memory: 1Gi
-            cpu: 400m
+            memory: 3Gi
+            cpu: 2
       volumes:
       - name: kylin-logs
         emptyDir:
-          sizeLimit: 3Gi
+          sizeLimit: 20Gi
       - name: tomcat-logs
         emptyDir:
-          sizeLimit: 2Gi
+          sizeLimit: 10Gi
       - configMap:
           name: hadoop-config
         name: hadoop-config
@@ -126,4 +119,6 @@ spec:
         name: filebeat-config
       - configMap:
           name: kylin-more-config
-        name: kylin-more-config
\ No newline at end of file
+        name: kylin-more-config
+  updateStrategy:
+    type: RollingUpdate
\ No newline at end of file
diff --git a/kubernetes/template/production/deployment/kylin/kylin-query-statefulset.yaml b/kubernetes/template/production/deployment/kylin/kylin-query-statefulset.yaml
index 508a54b..2af2797 100644
--- a/kubernetes/template/production/deployment/kylin/kylin-query-statefulset.yaml
+++ b/kubernetes/template/production/deployment/kylin/kylin-query-statefulset.yaml
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 apiVersion: apps/v1
 kind: StatefulSet
 metadata:
@@ -5,38 +22,27 @@ metadata:
   namespace: kylin-prod
 spec:
   serviceName: kylin-svc
-  replicas: 1
+  replicas: 2
   selector:
-    matchExpressions:
-    - key: app
-      operator: In
-      values:
-      - kylin
-    - key: query
-      operator: In
-      values:
-      - "true"
-    - key: job
-      operator: In
-      values:
-      - "false"
+    matchLabels:
+      app: kylin
+      role: query
   template:
     metadata:
       labels:
         app: kylin
-        query: "true"
-        job: "false"
+        role: query
     spec:
       containers:
       - name: kylin
-        image: kylin-cdh5:latest
+        image: kylin-client:{VERSION}
         imagePullPolicy: IfNotPresent
         command:
         - sh
         - -c
         args:
         - cp $KYLIN_HOME/tomcat-config/* $KYLIN_HOME/tomcat/conf;
-          cp $KYLIN_HOME/kylin-more-config $KYLIN_HOME/tomcat/webapps/kylin/WEB-INF/classes/
+          cp $KYLIN_HOME/kylin-more-config $KYLIN_HOME/tomcat/webapps/kylin/WEB-INF/classes/;
           $TOOL_HOME/bootstrap.sh server -d;
         ports:
         - containerPort: 7070
@@ -54,11 +60,11 @@ spec:
           mountPath: /home/apache_kylin/kylin/tomcat/logs
         resources:
           requests:
-            memory: 3Gi
-            cpu: 1
+            memory: 16Gi
+            cpu: 8
           limits:
-            memory: 4Gi
-            cpu: 2
+            memory: 16Gi
+            cpu: 8
       - name: filebeat
         image: docker.elastic.co/beats/filebeat:6.4.3
         args:
@@ -75,18 +81,18 @@ spec:
           readOnly: true
         resources:
           requests:
-            memory: 2Gi
-            cpu: 1
+            memory: 3Gi
+            cpu: 2
           limits:
-            memory: 2Gi
-            cpu: 1
+            memory: 3Gi
+            cpu: 2
       volumes:
       - name: kylin-logs
         emptyDir:
-          sizeLimit: 3Gi
+          sizeLimit: 20Gi
       - name: tomcat-logs
         emptyDir:
-          sizeLimit: 2Gi
+          sizeLimit: 10Gi
       - configMap:
           name: hadoop-config
         name: hadoop-config
@@ -107,4 +113,6 @@ spec:
         name: filebeat-config
       - configMap:
           name: kylin-more-config
-        name: kylin-more-config
\ No newline at end of file
+        name: kylin-more-config
+  updateStrategy:
+    type: RollingUpdate
\ No newline at end of file
diff --git a/kubernetes/template/production/deployment/kylin/kylin-service.yaml b/kubernetes/template/production/deployment/kylin/kylin-service.yaml
index 5b76f01..df90b95 100644
--- a/kubernetes/template/production/deployment/kylin/kylin-service.yaml
+++ b/kubernetes/template/production/deployment/kylin/kylin-service.yaml
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 apiVersion: v1
 kind: Service
 metadata:
@@ -13,4 +30,5 @@ spec:
     targetPort: 7443
   selector:
     app: kylin
+    role: query
   type: LoadBalancer
\ No newline at end of file
diff --git a/kubernetes/template/production/deployment/memcached/memcached-service.yaml b/kubernetes/template/production/deployment/memcached/memcached-service.yaml
index d1376f9..ff9255e 100644
--- a/kubernetes/template/production/deployment/memcached/memcached-service.yaml
+++ b/kubernetes/template/production/deployment/memcached/memcached-service.yaml
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 apiVersion: v1
 kind: Service
 metadata:
diff --git a/kubernetes/template/production/deployment/memcached/memcached-statefulset.yaml b/kubernetes/template/production/deployment/memcached/memcached-statefulset.yaml
index db4d7e9..ebdace6 100644
--- a/kubernetes/template/production/deployment/memcached/memcached-statefulset.yaml
+++ b/kubernetes/template/production/deployment/memcached/memcached-statefulset.yaml
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 apiVersion: apps/v1
 kind: StatefulSet
 metadata:
@@ -26,11 +43,11 @@ spec:
         - "-m 20480"
         resources:
           limits:
-            cpu: 500m
-            memory: 2Gi
+            cpu: 6
+            memory: 20Gi
           requests:
-            cpu: 500m
-            memory: 2Gi
+            cpu: 6
+            memory: 20Gi
         livenessProbe:
           tcpSocket:
             port: 11211
diff --git a/kubernetes/template/production/deployment/kylin/kylin-receiver-statefulset.yaml b/kubernetes/template/production/deployment/streaming/kylin-receiver-statefulset.yaml
similarity index 53%
rename from kubernetes/template/production/deployment/kylin/kylin-receiver-statefulset.yaml
rename to kubernetes/template/production/deployment/streaming/kylin-receiver-statefulset.yaml
index 371e0b9..e611845 100644
--- a/kubernetes/template/production/deployment/kylin/kylin-receiver-statefulset.yaml
+++ b/kubernetes/template/production/deployment/streaming/kylin-receiver-statefulset.yaml
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 apiVersion: apps/v1
 kind: StatefulSet
 metadata:
@@ -7,21 +24,20 @@ spec:
   serviceName: receiver-svc
   replicas: 2
   selector:
-    matchExpressions:
-    - key: app
-      operator: In
-      values:
-        - kylin-receiver
+    matchLabels:
+      app: kylin
+      role: receiver
   template:
     metadata:
       labels:
-        app: kylin-receiver
+        app: kylin
+        role: receiver
     spec:
       securityContext:
         fsGroup: 996
       containers:
       - name: kylin
-        image: kylin-cdh5:latest
+        image: kylin-client:{VERSION}
         imagePullPolicy: IfNotPresent
         command:
         - sh
@@ -37,11 +53,11 @@ spec:
           mountPath: /home/apache_kylin/kylin/stream_index
         resources:
           requests:
-            memory: 2Gi
-            cpu: 1
+            memory: 24Gi
+            cpu: 12
           limits:
-            memory: 2Gi
-            cpu: 1
+            memory: 24Gi
+            cpu: 12
       volumes:
         - configMap:
             name: hadoop-config
@@ -63,5 +79,5 @@ spec:
         - ReadWriteOnce
       resources:
         requests:
-          storage: 10Gi
+          storage: 20Gi
       storageClassName: local-dynamic
\ No newline at end of file
diff --git a/kubernetes/template/production/example/README.md b/kubernetes/template/production/example/README.md
new file mode 100644
index 0000000..2bd1d45
--- /dev/null
+++ b/kubernetes/template/production/example/README.md
@@ -0,0 +1,173 @@
+## Demo in CDH5.7
+
+> This doc will demonstrate how to apply production deployment template, so all **required** configuration files are provided in `config` directory. 
+> For CDH5.x user, I think you can simply replace hadoop configuration file with yours and executing the following 
+> commands to deploy a kylin cluster very quickly.
+
+### Pre-requirements
+
+- A healthy CDH 5.7 hadoop cluster.
+- A healthy on-premise, latest version K8s cluster, with at least 8 available cores and 20 GB memory.
+- A health Elasticsearch cluster.
+
+### Deploy steps
+
+- Create statefulset and service for memcached
+```
+root@open-source:/home/ubuntu/example/deployment# kubectl apply -f memcached/
+service/cache-svc created
+statefulset.apps/kylin-memcached created
+```
+
+
+- Check hostname of cache service
+```shell
+root@open-source:/home/ubuntu# kubectl run -it --image=busybox:1.28.4 --rm --restart=Never sh -n test-dns
+If you don't see a command prompt, try pressing enter.
+
+/ # nslookup cache-svc.kylin-example.svc.cluster.local
+Server:    10.96.0.10
+Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
+
+Name:      cache-svc.kylin-example.svc.cluster.local
+Address 1: 192.168.11.44 kylin-memcached-0.cache-svc.kylin-example.svc.cluster.local
+/ #
+```
+
+- Modify memcached configuration
+```shell
+# modify memcached hostname(session sharing)
+# memcachedNodes="n1:kylin-memcached-0.cache-svc.kylin-example.svc.cluster.local:11211"
+vim ../config/tomcat/context.xml
+
+
+# modify memcached hostname(query cache)
+# kylin.cache.memcached.hosts=kylin-memcached-0.cache-svc.kylin-example.svc.cluster.local:11211
+vim ../config/kylin-job/kylin.properties
+vim ../config/kylin-query/kylin.properties
+```
+
+- Create configMap
+```shell
+root@open-source:/home/ubuntu/example/deployment# kubectl create configmap -n kylin-example hadoop-config \
+>     --from-file=../config/hadoop/core-site.xml \
+>     --from-file=../config/hadoop/hdfs-site.xml \
+>     --from-file=../config/hadoop/yarn-site.xml \
+>     --from-file=../config/hadoop/mapred-site.xml \
+>     --dry-run -o yaml | kubectl apply -f -
+W0426 07:45:45.742257   19657 helpers.go:535] --dry-run is deprecated and can be replaced with --dry-run=client.
+configmap/hadoop-config created
+
+root@open-source:/home/ubuntu/example/deployment# kubectl create configmap -n kylin-example hive-config \
+>     --from-file=../config/hadoop/hive-site.xml \
+>     --dry-run -o yaml | kubectl apply -f -
+W0426 07:45:54.889003   19864 helpers.go:535] --dry-run is deprecated and can be replaced with --dry-run=client.
+configmap/hive-config created
+
+root@open-source:/home/ubuntu/example/deployment# kubectl create configmap -n kylin-example hbase-config \
+>     --from-file=../config/hadoop/hbase-site.xml \
+>     --dry-run -o yaml | kubectl apply -f -
+W0426 07:46:04.623956   20071 helpers.go:535] --dry-run is deprecated and can be replaced with --dry-run=client.
+configmap/hbase-config created
+
+root@open-source:/home/ubuntu/example/deployment# kubectl create configmap -n kylin-example kylin-more-config \
+>     --from-file=../config/kylin-more/applicationContext.xml \
+>     --from-file=../config/kylin-more/ehcache.xml \
+>     --from-file=../config/kylin-more/ehcache-test.xml \
+>     --from-file=../config/kylin-more/kylinMetrics.xml \
+>     --from-file=../config/kylin-more/kylinSecurity.xml \
+>     --dry-run -o yaml | kubectl apply -f -
+W0426 08:02:13.170807    5454 helpers.go:535] --dry-run is deprecated and can be replaced with --dry-run=client.
+
+root@open-source:/home/ubuntu/example/deployment# kubectl create configmap -n kylin-example kylin-job-config  \
+>     --from-file=../config/kylin-job/kylin-kafka-consumer.xml \
+>     --from-file=../config/kylin-job/kylin_hive_conf.xml \
+>     --from-file=../config/kylin-job/kylin_job_conf.xml \
+>     --from-file=../config/kylin-job/kylin_job_conf_inmem.xml \
+>     --from-file=../config/kylin-job/kylin-server-log4j.properties \
+>     --from-file=../config/kylin-job/kylin-spark-log4j.properties \
+>     --from-file=../config/kylin-job/kylin-tools-log4j.properties \
+>     --from-file=../config/kylin-job/kylin.properties \
+>     --from-file=../config/kylin-job/setenv.sh \
+>     --from-file=../config/kylin-job/setenv-tool.sh \
+>     --dry-run -o yaml | kubectl apply -f -
+W0426 08:02:35.168886    5875 helpers.go:535] --dry-run is deprecated and can be replaced with --dry-run=client.
+configmap/kylin-job-config created
+
+root@open-source:/home/ubuntu/example/deployment# kubectl create configmap -n kylin-example filebeat-config  \
+>     --from-file=../config/filebeat/filebeat.yml \
+>     --dry-run -o yaml | kubectl apply -f -
+W0426 08:07:08.983369   10722 helpers.go:535] --dry-run is deprecated and can be replaced with --dry-run=client.
+configmap/filebeat-config created
+
+root@open-source:/home/ubuntu/example/deployment# kubectl create configmap -n kylin-example tomcat-config  \
+>     --from-file=../config/tomcat/server.xml \
+>     --from-file=../config/tomcat/context.xml \
+>     --dry-run -o yaml | kubectl apply -f -
+W0426 08:07:48.439995   11459 helpers.go:535] --dry-run is deprecated and can be replaced with --dry-run=client.
+configmap/tomcat-config created
+```
+
+- Deploy Kylin's Job server(two instances)
+```shell
+root@open-source:/home/ubuntu/example/deployment# kubectl apply -f kylin-job/
+service/kylin-job-svc created
+statefulset.apps/kylin-job created
+```
+
+
+- Deploy Kylin's Query server(two instances)
+```shell
+root@open-source:/home/ubuntu/example/deployment# kubectl apply -f kylin-query/
+service/kylin-query-svc created
+statefulset.apps/kylin-query created
+```
+
+- Visit Web UI
+1. `http://${POD_HOSTNAME}:30010/kylin` for JobServer 
+2. `http://${POD_HOSTNAME}:30012/kylin` for QueryServer
+
+### Cleanup
+```shell
+root@open-source:/home/ubuntu/example/deployment# kubectl delete -f memcached/
+root@open-source:/home/ubuntu/example/deployment# kubectl delete -f kylin-query/
+root@open-source:/home/ubuntu/example/deployment# kubectl delete -f kylin-job/
+```
+
+### Troubleshooting
+- Check logs of specific pod
+```shell
+##  Output of : sh kylin.sh start
+root@open-source:/home/ubuntu/example/deployment# kubectl logs kylin-job-0 kylin -n kylin-example
+
+root@open-source:/home/ubuntu/example/deployment# kubectl logs -f kylin-job-0 kylin -n kylin-example
+```
+
+- Attach to a specific pod, say "kylin-job-0"
+```shell
+root@open-source:/home/ubuntu/example/deployment# kubectl exec -it  kylin-job-0  -n kylin-example -- bash
+```
+
+- Check failure reasons of specific pod
+```shell
+root@open-source:/home/ubuntu/example/deployment# kubectl get pod kylin-job-0  -n kylin-example -o yaml
+
+root@open-source:/home/ubuntu/example/deployment# kubectl describe pod kylin-job-0  -n kylin-example
+```
+
+- Check if all status is Running
+```shell
+root@open-source:/home/ubuntu/example/deployment# kubectl get statefulset -n kylin-example
+NAME              READY   AGE
+kylin-memcached   1/1     45s
+
+root@open-source:/home/ubuntu/example/deployment# kubectl get service -n kylin-example
+NAME        TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)     AGE
+cache-svc   ClusterIP   None         <none>        11211/TCP   54s
+
+root@open-source:/home/ubuntu/example/deployment# kubectl get pod -n kylin-example
+NAME                READY   STATUS    RESTARTS   AGE
+kylin-memcached-0   1/1     Running   0          61s
+```
+
+- If you don't have a Elasticsearch cluster or not interested in log collection, please remove `filebeat` container in both `kylin-query-stateful.yaml` and `kylin-job-stateful.yaml`.
\ No newline at end of file
diff --git a/kubernetes/example/config/filebeat/filebeat.yml b/kubernetes/template/production/example/config/filebeat/filebeat.yml
similarity index 81%
rename from kubernetes/example/config/filebeat/filebeat.yml
rename to kubernetes/template/production/example/config/filebeat/filebeat.yml
index 6916da1..a5ac4f7 100644
--- a/kubernetes/example/config/filebeat/filebeat.yml
+++ b/kubernetes/template/production/example/config/filebeat/filebeat.yml
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 ###################### Filebeat Configuration Example #########################
 
 # This file is an example configuration file highlighting only the most common
diff --git a/kubernetes/example/config/hadoop/core-site.xml b/kubernetes/template/production/example/config/hadoop/core-site.xml
similarity index 81%
rename from kubernetes/example/config/hadoop/core-site.xml
rename to kubernetes/template/production/example/config/hadoop/core-site.xml
index d5622cb..9ad88f8 100644
--- a/kubernetes/example/config/hadoop/core-site.xml
+++ b/kubernetes/template/production/example/config/hadoop/core-site.xml
@@ -1,6 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
-<!--Autogenerated by Cloudera Manager-->
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
   <property>
     <name>fs.defaultFS</name>
diff --git a/kubernetes/example/config/hadoop/hbase-site.xml b/kubernetes/template/production/example/config/hadoop/hbase-site.xml
similarity index 77%
rename from kubernetes/example/config/hadoop/hbase-site.xml
rename to kubernetes/template/production/example/config/hadoop/hbase-site.xml
index e9b7069..17b5a55 100644
--- a/kubernetes/example/config/hadoop/hbase-site.xml
+++ b/kubernetes/template/production/example/config/hadoop/hbase-site.xml
@@ -1,6 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
-<!--Autogenerated by Cloudera Manager-->
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
   <property>
     <name>hbase.rootdir</name>
diff --git a/kubernetes/example/config/hadoop/hdfs-site.xml b/kubernetes/template/production/example/config/hadoop/hdfs-site.xml
similarity index 67%
rename from kubernetes/example/config/hadoop/hdfs-site.xml
rename to kubernetes/template/production/example/config/hadoop/hdfs-site.xml
index f8e8fa0..6533089 100644
--- a/kubernetes/example/config/hadoop/hdfs-site.xml
+++ b/kubernetes/template/production/example/config/hadoop/hdfs-site.xml
@@ -1,6 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
-<!--Autogenerated by Cloudera Manager-->
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
   <property>
     <name>dfs.namenode.name.dir</name>
diff --git a/kubernetes/example/config/hadoop/hive-site.xml b/kubernetes/template/production/example/config/hadoop/hive-site.xml
similarity index 87%
rename from kubernetes/example/config/hadoop/hive-site.xml
rename to kubernetes/template/production/example/config/hadoop/hive-site.xml
index 598c690..a4c30e5 100644
--- a/kubernetes/example/config/hadoop/hive-site.xml
+++ b/kubernetes/template/production/example/config/hadoop/hive-site.xml
@@ -1,6 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
-<!--Autogenerated by Cloudera Manager-->
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
   <property>
     <name>hive.metastore.uris</name>
diff --git a/kubernetes/example/config/hadoop/mapred-site.xml b/kubernetes/template/production/example/config/hadoop/mapred-site.xml
similarity index 85%
rename from kubernetes/example/config/hadoop/mapred-site.xml
rename to kubernetes/template/production/example/config/hadoop/mapred-site.xml
index 842b7fb..220ff09 100644
--- a/kubernetes/example/config/hadoop/mapred-site.xml
+++ b/kubernetes/template/production/example/config/hadoop/mapred-site.xml
@@ -1,6 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
-<!--Autogenerated by Cloudera Manager-->
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
   <property>
     <name>mapreduce.job.split.metainfo.maxsize</name>
diff --git a/kubernetes/example/config/hadoop/yarn-site.xml b/kubernetes/template/production/example/config/hadoop/yarn-site.xml
similarity index 81%
rename from kubernetes/example/config/hadoop/yarn-site.xml
rename to kubernetes/template/production/example/config/hadoop/yarn-site.xml
index 569cc20..ee0325d 100644
--- a/kubernetes/example/config/hadoop/yarn-site.xml
+++ b/kubernetes/template/production/example/config/hadoop/yarn-site.xml
@@ -1,6 +1,20 @@
 <?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
 
-<!--Autogenerated by Cloudera Manager-->
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
 <configuration>
   <property>
     <name>yarn.acl.enable</name>
diff --git a/kubernetes/example/config/kylin-query/kylin-kafka-consumer.xml b/kubernetes/template/production/example/config/kylin-job/kylin-kafka-consumer.xml
similarity index 100%
rename from kubernetes/example/config/kylin-query/kylin-kafka-consumer.xml
rename to kubernetes/template/production/example/config/kylin-job/kylin-kafka-consumer.xml
diff --git a/kubernetes/example/config/kylin-query/kylin-server-log4j.properties b/kubernetes/template/production/example/config/kylin-job/kylin-server-log4j.properties
similarity index 100%
rename from kubernetes/example/config/kylin-query/kylin-server-log4j.properties
rename to kubernetes/template/production/example/config/kylin-job/kylin-server-log4j.properties
diff --git a/kubernetes/example/config/kylin-query/kylin-spark-log4j.properties b/kubernetes/template/production/example/config/kylin-job/kylin-spark-log4j.properties
similarity index 100%
rename from kubernetes/example/config/kylin-query/kylin-spark-log4j.properties
rename to kubernetes/template/production/example/config/kylin-job/kylin-spark-log4j.properties
diff --git a/kubernetes/example/config/kylin-query/kylin-tools-log4j.properties b/kubernetes/template/production/example/config/kylin-job/kylin-tools-log4j.properties
similarity index 100%
rename from kubernetes/example/config/kylin-query/kylin-tools-log4j.properties
rename to kubernetes/template/production/example/config/kylin-job/kylin-tools-log4j.properties
diff --git a/kubernetes/example/config/kylin-job/kylin.properties b/kubernetes/template/production/example/config/kylin-job/kylin.properties
similarity index 100%
rename from kubernetes/example/config/kylin-job/kylin.properties
rename to kubernetes/template/production/example/config/kylin-job/kylin.properties
diff --git a/kubernetes/example/config/kylin-query/kylin_hive_conf.xml b/kubernetes/template/production/example/config/kylin-job/kylin_hive_conf.xml
similarity index 100%
rename from kubernetes/example/config/kylin-query/kylin_hive_conf.xml
rename to kubernetes/template/production/example/config/kylin-job/kylin_hive_conf.xml
diff --git a/kubernetes/example/config/kylin-query/kylin_job_conf.xml b/kubernetes/template/production/example/config/kylin-job/kylin_job_conf.xml
similarity index 100%
rename from kubernetes/example/config/kylin-query/kylin_job_conf.xml
rename to kubernetes/template/production/example/config/kylin-job/kylin_job_conf.xml
diff --git a/kubernetes/example/config/kylin-query/kylin_job_conf_cube_merge.xml b/kubernetes/template/production/example/config/kylin-job/kylin_job_conf_cube_merge.xml
similarity index 100%
rename from kubernetes/example/config/kylin-query/kylin_job_conf_cube_merge.xml
rename to kubernetes/template/production/example/config/kylin-job/kylin_job_conf_cube_merge.xml
diff --git a/kubernetes/example/config/kylin-query/kylin_job_conf_inmem.xml b/kubernetes/template/production/example/config/kylin-job/kylin_job_conf_inmem.xml
similarity index 100%
rename from kubernetes/example/config/kylin-query/kylin_job_conf_inmem.xml
rename to kubernetes/template/production/example/config/kylin-job/kylin_job_conf_inmem.xml
diff --git a/kubernetes/example/config/kylin-job/setenv-tool.sh b/kubernetes/template/production/example/config/kylin-job/setenv-tool.sh
similarity index 100%
rename from kubernetes/example/config/kylin-job/setenv-tool.sh
rename to kubernetes/template/production/example/config/kylin-job/setenv-tool.sh
diff --git a/kubernetes/example/config/kylin-query/setenv.sh b/kubernetes/template/production/example/config/kylin-job/setenv.sh
similarity index 100%
rename from kubernetes/example/config/kylin-query/setenv.sh
rename to kubernetes/template/production/example/config/kylin-job/setenv.sh
diff --git a/kubernetes/example/config/kylin-more/applicationContext.xml b/kubernetes/template/production/example/config/kylin-more/applicationContext.xml
similarity index 100%
rename from kubernetes/example/config/kylin-more/applicationContext.xml
rename to kubernetes/template/production/example/config/kylin-more/applicationContext.xml
diff --git a/kubernetes/example/config/kylin-more/ehcache-test.xml b/kubernetes/template/production/example/config/kylin-more/ehcache-test.xml
similarity index 100%
rename from kubernetes/example/config/kylin-more/ehcache-test.xml
rename to kubernetes/template/production/example/config/kylin-more/ehcache-test.xml
diff --git a/kubernetes/example/config/kylin-more/ehcache.xml b/kubernetes/template/production/example/config/kylin-more/ehcache.xml
similarity index 100%
rename from kubernetes/example/config/kylin-more/ehcache.xml
rename to kubernetes/template/production/example/config/kylin-more/ehcache.xml
diff --git a/kubernetes/example/config/kylin-more/kylinMetrics.xml b/kubernetes/template/production/example/config/kylin-more/kylinMetrics.xml
similarity index 100%
rename from kubernetes/example/config/kylin-more/kylinMetrics.xml
rename to kubernetes/template/production/example/config/kylin-more/kylinMetrics.xml
diff --git a/kubernetes/example/config/kylin-more/kylinSecurity.xml b/kubernetes/template/production/example/config/kylin-more/kylinSecurity.xml
similarity index 100%
rename from kubernetes/example/config/kylin-more/kylinSecurity.xml
rename to kubernetes/template/production/example/config/kylin-more/kylinSecurity.xml
diff --git a/kubernetes/example/config/kylin-job/kylin-kafka-consumer.xml b/kubernetes/template/production/example/config/kylin-query/kylin-kafka-consumer.xml
similarity index 100%
rename from kubernetes/example/config/kylin-job/kylin-kafka-consumer.xml
rename to kubernetes/template/production/example/config/kylin-query/kylin-kafka-consumer.xml
diff --git a/kubernetes/example/config/kylin-job/kylin-server-log4j.properties b/kubernetes/template/production/example/config/kylin-query/kylin-server-log4j.properties
similarity index 100%
copy from kubernetes/example/config/kylin-job/kylin-server-log4j.properties
copy to kubernetes/template/production/example/config/kylin-query/kylin-server-log4j.properties
diff --git a/kubernetes/example/config/kylin-job/kylin-spark-log4j.properties b/kubernetes/template/production/example/config/kylin-query/kylin-spark-log4j.properties
similarity index 100%
rename from kubernetes/example/config/kylin-job/kylin-spark-log4j.properties
rename to kubernetes/template/production/example/config/kylin-query/kylin-spark-log4j.properties
diff --git a/kubernetes/example/config/kylin-job/kylin-tools-log4j.properties b/kubernetes/template/production/example/config/kylin-query/kylin-tools-log4j.properties
similarity index 100%
rename from kubernetes/example/config/kylin-job/kylin-tools-log4j.properties
rename to kubernetes/template/production/example/config/kylin-query/kylin-tools-log4j.properties
diff --git a/kubernetes/example/config/kylin-query/kylin.properties b/kubernetes/template/production/example/config/kylin-query/kylin.properties
similarity index 99%
rename from kubernetes/example/config/kylin-query/kylin.properties
rename to kubernetes/template/production/example/config/kylin-query/kylin.properties
index d36fde1..448821e 100644
--- a/kubernetes/example/config/kylin-query/kylin.properties
+++ b/kubernetes/template/production/example/config/kylin-query/kylin.properties
@@ -39,7 +39,7 @@ kylin.metadata.url=kylin_metadata_k8s_poc@hbase
 #kylin.metadata.sync-retries=3
 #
 ## Working folder in HDFS, better be qualified absolute path, make sure user has the right permission to this directory
-kylin.env.hdfs-working-dir=/kylin/apache_kylin
+kylin.env.hdfs-working-dir=/kylin
 #
 ## DEV|QA|PROD. DEV will turn on some dev features, QA and PROD has no difference in terms of functions.
 #kylin.env=QA
diff --git a/kubernetes/example/config/kylin-job/kylin_hive_conf.xml b/kubernetes/template/production/example/config/kylin-query/kylin_hive_conf.xml
similarity index 100%
rename from kubernetes/example/config/kylin-job/kylin_hive_conf.xml
rename to kubernetes/template/production/example/config/kylin-query/kylin_hive_conf.xml
diff --git a/kubernetes/example/config/kylin-job/kylin_job_conf.xml b/kubernetes/template/production/example/config/kylin-query/kylin_job_conf.xml
similarity index 100%
rename from kubernetes/example/config/kylin-job/kylin_job_conf.xml
rename to kubernetes/template/production/example/config/kylin-query/kylin_job_conf.xml
diff --git a/kubernetes/example/config/kylin-job/kylin_job_conf_cube_merge.xml b/kubernetes/template/production/example/config/kylin-query/kylin_job_conf_cube_merge.xml
similarity index 100%
rename from kubernetes/example/config/kylin-job/kylin_job_conf_cube_merge.xml
rename to kubernetes/template/production/example/config/kylin-query/kylin_job_conf_cube_merge.xml
diff --git a/kubernetes/example/config/kylin-job/kylin_job_conf_inmem.xml b/kubernetes/template/production/example/config/kylin-query/kylin_job_conf_inmem.xml
similarity index 100%
rename from kubernetes/example/config/kylin-job/kylin_job_conf_inmem.xml
rename to kubernetes/template/production/example/config/kylin-query/kylin_job_conf_inmem.xml
diff --git a/kubernetes/example/config/kylin-query/setenv-tool.sh b/kubernetes/template/production/example/config/kylin-query/setenv-tool.sh
similarity index 100%
rename from kubernetes/example/config/kylin-query/setenv-tool.sh
rename to kubernetes/template/production/example/config/kylin-query/setenv-tool.sh
diff --git a/kubernetes/example/config/kylin-job/setenv.sh b/kubernetes/template/production/example/config/kylin-query/setenv.sh
similarity index 100%
rename from kubernetes/example/config/kylin-job/setenv.sh
rename to kubernetes/template/production/example/config/kylin-query/setenv.sh
diff --git a/kubernetes/template/production/example/config/tomcat/context.xml b/kubernetes/template/production/example/config/tomcat/context.xml
new file mode 100644
index 0000000..5572247
--- /dev/null
+++ b/kubernetes/template/production/example/config/tomcat/context.xml
@@ -0,0 +1,48 @@
+<?xml version='1.0' encoding='utf-8'?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~  
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~  
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<!-- The contents of this file will be loaded for each web application -->
+<Context allowLinking="true">
+
+    <!-- Default set of monitored resources -->
+    <WatchedResource>WEB-INF/web.xml</WatchedResource>
+
+    <!-- Uncomment this to disable session persistence across Tomcat restarts -->
+    <!--
+    <Manager pathname="" />
+    -->
+
+    <!-- Uncomment this to enable Comet connection tacking (provides events
+         on session expiration as well as webapp lifecycle) -->
+    <!--
+    <Valve className="org.apache.catalina.valves.CometConnectionManagerValve" />
+    -->
+
+    <Loader loaderClass="org.apache.kylin.ext.CustomizedWebappClassloader"/>
+
+
+    <!-- -->
+    <Manager className="de.javakaffee.web.msm.MemcachedBackupSessionManager"
+             memcachedNodes="n1:{kylin-memcached-server}:11211"
+             failoverNodes="n1"
+             storageKeyPrefix="context"
+             requestUriIgnorePattern=".*\.(ico|png|gif|jpg|css|js)$"
+    />
+
+
+</Context>
diff --git a/kubernetes/template/production/example/config/tomcat/server.xml b/kubernetes/template/production/example/config/tomcat/server.xml
new file mode 100644
index 0000000..c626bc4
--- /dev/null
+++ b/kubernetes/template/production/example/config/tomcat/server.xml
@@ -0,0 +1,142 @@
+<?xml version='1.0' encoding='utf-8'?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!-- Note:  A "Server" is not itself a "Container", so you may not
+     define subcomponents such as "Valves" at this level.
+     Documentation at /docs/config/server.html
+ -->
+<Server port="9005" shutdown="SHUTDOWN">
+    <!-- Security listener. Documentation at /docs/config/listeners.html
+    <Listener className="org.apache.catalina.security.SecurityListener" />
+    -->
+    <!--APR library loader. Documentation at /docs/apr.html -->
+    <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
+    <!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
+    <Listener className="org.apache.catalina.core.JasperListener" />
+    <!-- Prevent memory leaks due to use of particular java/javax APIs-->
+    <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
+    <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
+    <Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
+
+    <!-- Global JNDI resources
+         Documentation at /docs/jndi-resources-howto.html
+    -->
+    <GlobalNamingResources>
+        <!-- Editable user database that can also be used by
+             UserDatabaseRealm to authenticate users
+        -->
+        <Resource name="UserDatabase" auth="Container"
+                  type="org.apache.catalina.UserDatabase"
+                  description="User database that can be updated and saved"
+                  factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
+                  pathname="conf/tomcat-users.xml" />
+    </GlobalNamingResources>
+
+    <!-- A "Service" is a collection of one or more "Connectors" that share
+         a single "Container" Note:  A "Service" is not itself a "Container",
+         so you may not define subcomponents such as "Valves" at this level.
+         Documentation at /docs/config/service.html
+     -->
+    <Service name="Catalina">
+
+        <!--The connectors can use a shared executor, you can define one or more named thread pools-->
+        <!--
+        <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
+            maxThreads="150" minSpareThreads="4"/>
+        -->
+
+
+        <!-- A "Connector" represents an endpoint by which requests are received
+             and responses are returned. Documentation at :
+             Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)
+             Java AJP  Connector: /docs/config/ajp.html
+             APR (HTTP/AJP) Connector: /docs/apr.html
+             Define a non-SSL HTTP/1.1 Connector on port 8080
+        -->
+        <Connector port="7070" protocol="HTTP/1.1"
+                   connectionTimeout="20000"
+                   redirectPort="7443"
+                   compression="on"
+                   compressionMinSize="2048"
+                   noCompressionUserAgents="gozilla,traviata"
+                   compressableMimeType="text/html,text/xml,text/javascript,application/javascript,application/json,text/css,text/plain"
+                   URIEncoding="UTF-8"
+        />
+        <!-- A "Connector" using the shared thread pool-->
+        <!-- Define a SSL HTTP/1.1 Connector on port 8443
+             This connector uses the BIO implementation that requires the JSSE
+             style configuration. When using the APR/native implementation, the
+             OpenSSL style configuration is required as described in the APR/native
+             documentation -->
+        <Connector port="7443" protocol="org.apache.coyote.http11.Http11Protocol"
+                   maxThreads="150" SSLEnabled="true" scheme="https" secure="true"
+                   keystoreFile="conf/.keystore" keystorePass="changeit"
+                   clientAuth="false" sslProtocol="TLS" />
+
+        <!-- Define an AJP 1.3 Connector on port 8009 -->
+        <Connector port="9009" protocol="AJP/1.3" redirectPort="9443" />
+
+
+        <!-- An Engine represents the entry point (within Catalina) that processes
+             every request.  The Engine implementation for Tomcat stand alone
+             analyzes the HTTP headers included with the request, and passes them
+             on to the appropriate Host (virtual host).
+             Documentation at /docs/config/engine.html -->
+
+        <!-- You should set jvmRoute to support load-balancing via AJP ie :
+        <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
+        -->
+        <Engine name="Catalina" defaultHost="localhost">
+
+            <!--For clustering, please take a look at documentation at:
+                /docs/cluster-howto.html  (simple how to)
+                /docs/config/cluster.html (reference documentation) -->
+            <!--
+            <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
+            -->
+
+            <!-- Use the LockOutRealm to prevent attempts to guess user passwords
+                 via a brute-force attack -->
+            <Realm className="org.apache.catalina.realm.LockOutRealm">
+                <!-- This Realm uses the UserDatabase configured in the global JNDI
+                     resources under the key "UserDatabase".  Any edits
+                     that are performed against this UserDatabase are immediately
+                     available for use by the Realm.  -->
+                <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
+                       resourceName="UserDatabase"/>
+            </Realm>
+
+            <Host name="localhost"  appBase="webapps"
+                  unpackWARs="true" autoDeploy="true">
+
+                <!-- SingleSignOn valve, share authentication between web applications
+                     Documentation at: /docs/config/valve.html -->
+                <!--
+                <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
+                -->
+
+                <!-- Access log processes all example.
+                     Documentation at: /docs/config/valve.html
+                     Note: The pattern used is equivalent to using pattern="common" -->
+                <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
+                       prefix="localhost_access_log." suffix=".txt"
+                       pattern="%h %l %u %t &quot;%r&quot; %s %b %D %{User-Agent}i" />
+
+            </Host>
+        </Engine>
+    </Service>
+</Server>
diff --git a/kubernetes/example/deployment/deploy-sample-cluster.sh b/kubernetes/template/production/example/deployment/deploy-sample-cluster.sh
similarity index 60%
rename from kubernetes/example/deployment/deploy-sample-cluster.sh
rename to kubernetes/template/production/example/deployment/deploy-sample-cluster.sh
index 3d67e9c..ef51852 100644
--- a/kubernetes/example/deployment/deploy-sample-cluster.sh
+++ b/kubernetes/template/production/example/deployment/deploy-sample-cluster.sh
@@ -1,16 +1,24 @@
-
-systemctl status docker-ce
-systemctl status kube-apiserver
-systemctl status kube-controller-manager
-systemctl status kube-scheduler
-systemctl status kubelet
-systemctl status kube-proxy
-
-
-## create namespace
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+## Step1. Create namespace
 kubectl create namespace kylin-example
 
-## Create configmap
+## Step2. Create configmap(please consider using secret in production env)
 kubectl create configmap -n kylin-example hadoop-config \
     --from-file=../config/hadoop/core-site.xml \
     --from-file=../config/hadoop/hdfs-site.xml \
@@ -34,19 +42,6 @@ kubectl create configmap -n kylin-example kylin-more-config \
     --from-file=../config/kylin-more/kylinSecurity.xml \
     --dry-run -o yaml | kubectl apply -f -
 
-kubectl create configmap -n kylin-example kylin-all-config  \
-    --from-file=../config/kylin-all/kylin-kafka-consumer.xml \
-    --from-file=../config/kylin-all/kylin_hive_conf.xml \
-    --from-file=../config/kylin-all/kylin_job_conf.xml \
-    --from-file=../config/kylin-all/kylin_job_conf_inmem.xml \
-    --from-file=../config/kylin-all/kylin-server-log4j.properties \
-    --from-file=../config/kylin-all/kylin-spark-log4j.properties \
-    --from-file=../config/kylin-all/kylin-tools-log4j.properties \
-    --from-file=../config/kylin-all/kylin.properties \
-    --from-file=../config/kylin-all/setenv.sh \
-    --from-file=../config/kylin-all/setenv-tool.sh \
-    --dry-run -o yaml | kubectl apply -f -
-
 kubectl create configmap -n kylin-example kylin-job-config  \
     --from-file=../config/kylin-job/kylin-kafka-consumer.xml \
     --from-file=../config/kylin-job/kylin_hive_conf.xml \
@@ -73,11 +68,6 @@ kubectl create configmap -n kylin-example kylin-query-config  \
     --from-file=../config/kylin-query/setenv-tool.sh \
     --dry-run -o yaml | kubectl apply -f -
 
-kubectl create configmap -n kylin-example kylin-receiver-config  \
-    --from-file=../config/streaming-receiver/kylin.properties \
-    --from-file=../config/streaming-receiver/setenv.sh \
-    --dry-run -o yaml | kubectl apply -f -
-
 kubectl create configmap -n kylin-example filebeat-config  \
     --from-file=../config/filebeat/filebeat.yml \
     --dry-run -o yaml | kubectl apply -f -
@@ -87,26 +77,17 @@ kubectl create configmap -n kylin-example tomcat-config  \
     --from-file=../config/tomcat/context.xml \
     --dry-run -o yaml | kubectl apply -f -
 
-### Prepare memcached service
-
-kubectl create -f deployment/memcached/memcached-service.yaml
-kubectl create -f deployment/memcached/memcached-statefulset.yaml
-
-
-### Prepare kylin service
-
-
-## Create headless serivce
-kubectl create -f deployment/kylin/kylin-service.yaml
-
-## Create statefulset
-kubectl create -f deployment/kylin/kylin-all-statefulset.yaml
-kubectl create -f deployment/kylin/kylin-job-statefulset.yaml
-
-#kubectl create -f deployment/kylin/kylin-query-statefulset.yaml
-#kubectl create -f deployment/kylin/kylin-receiver-statefulset.yaml
-
-kubectl delete -f deployment/kylin/kylin-all-statefulset.yaml
-kubectl delete -f deployment/kylin/kylin-job-statefulset.yaml
+## Step3. Deploy all service
+kubectl apply -f memcached
+kubectl apply -f kylin-job
+kubectl apply -f kylin-query
 
+## Step4. Check state
+kubectl get pod -n kylin-example
+kubectl get service -n kylin-example
+kubectl get statefulset -n kylin-example
 
+## Step5. Delete all(kylin + memcached) cluster
+kubectl apply -f memcached
+kubectl apply -f kylin-job
+kubectl apply -f kylin-query
\ No newline at end of file
diff --git a/kubernetes/config/production/kylin-all/kylin-server-log4j.properties b/kubernetes/template/production/example/deployment/kylin-job/kylin-job-service.yaml
similarity index 62%
rename from kubernetes/config/production/kylin-all/kylin-server-log4j.properties
rename to kubernetes/template/production/example/deployment/kylin-job/kylin-job-service.yaml
index 5dab102..eb6c111 100644
--- a/kubernetes/config/production/kylin-all/kylin-server-log4j.properties
+++ b/kubernetes/template/production/example/deployment/kylin-job/kylin-job-service.yaml
@@ -15,16 +15,22 @@
 # limitations under the License.
 #
 
-
-#define appenders
-log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.file.layout=org.apache.log4j.PatternLayout
-log4j.appender.file.File=${catalina.home}/../logs/kylin.log
-log4j.appender.file.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
-log4j.appender.file.Append=true
-
-#overall config
-log4j.rootLogger=INFO,file
-log4j.logger.org.apache.kylin=DEBUG
-log4j.logger.org.springframework=WARN
-log4j.logger.org.springframework.security=INFO
\ No newline at end of file
+apiVersion: v1
+kind: Service
+metadata:
+  name: kylin-job-svc
+  namespace: kylin-example
+spec:
+  ports:
+  - name: http
+    port: 80
+    targetPort: 7070
+    nodePort: 30010
+  - name: https
+    port: 443
+    targetPort: 7443
+    nodePort: 30011
+  selector:
+    app: kylin
+    role: job
+  type: NodePort # LoadBalancer
\ No newline at end of file
diff --git a/kubernetes/example/deployment/kylin-job/kylin-job-statefulset.yaml b/kubernetes/template/production/example/deployment/kylin-job/kylin-job-statefulset.yaml
similarity index 75%
rename from kubernetes/example/deployment/kylin-job/kylin-job-statefulset.yaml
rename to kubernetes/template/production/example/deployment/kylin-job/kylin-job-statefulset.yaml
index 85e70ec..f85bc44 100644
--- a/kubernetes/example/deployment/kylin-job/kylin-job-statefulset.yaml
+++ b/kubernetes/template/production/example/deployment/kylin-job/kylin-job-statefulset.yaml
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 apiVersion: apps/v1
 kind: StatefulSet
 metadata:
@@ -11,7 +28,7 @@ metadata:
     hadoop.version: cdh5.7
 spec:
   serviceName: kylin-job-svc
-  replicas: 2
+  replicas: 1
   selector:
     matchLabels:
       app: kylin
@@ -22,19 +39,6 @@ spec:
         app: kylin
         role: job
     spec:
-      hostAliases:
-        - ip: "10.1.3.90"
-          hostnames:
-            - "cdh-master"
-        - ip: "10.1.3.92"
-          hostnames:
-            - "cdh-worker-1"
-        - ip: "10.1.3.93"
-          hostnames:
-            - "cdh-worker-2"
-        - ip: "10.1.3.91"
-          hostnames:
-            - "cdh-client"
       containers:
         - name: kylin
           image: kylin-client:3.0.1-cdh57
@@ -69,10 +73,10 @@ spec:
           resources:
             requests:
               memory: 2Gi
-              cpu: 1100m
+              cpu: 1000m
             limits:
               memory: 2Gi
-              cpu: 1100m
+              cpu: 1000m
         - name: filebeat
           image: docker.elastic.co/beats/filebeat:6.4.3
           args:
@@ -90,10 +94,10 @@ spec:
           resources:
             requests:
               memory: 1Gi
-              cpu: 400m
+              cpu: 300m
             limits:
               memory: 1Gi
-              cpu: 400m
+              cpu: 300m
       volumes:
         - name: kylin-logs
           emptyDir:
@@ -121,4 +125,6 @@ spec:
           name: filebeat-config
         - configMap:
             name: kylin-more-config
-          name: kylin-more-config
\ No newline at end of file
+          name: kylin-more-config
+  updateStrategy:
+    type: RollingUpdate
\ No newline at end of file
diff --git a/kubernetes/example/deployment/kylin-query/kylin-query-statefulset.yaml b/kubernetes/template/production/example/deployment/kylin-query/kylin-query-statefulset.yaml
similarity index 56%
rename from kubernetes/example/deployment/kylin-query/kylin-query-statefulset.yaml
rename to kubernetes/template/production/example/deployment/kylin-query/kylin-query-statefulset.yaml
index 49cee4f..971f881 100644
--- a/kubernetes/example/deployment/kylin-query/kylin-query-statefulset.yaml
+++ b/kubernetes/template/production/example/deployment/kylin-query/kylin-query-statefulset.yaml
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 apiVersion: apps/v1
 kind: StatefulSet
 metadata:
@@ -11,7 +28,7 @@ metadata:
     hadoop.version: cdh5.7
 spec:
   serviceName: kylin-query-svc
-  replicas: 2
+  replicas: 1
   selector:
     matchLabels:
       app: kylin
@@ -31,29 +48,35 @@ spec:
         - -c
         args:
         - cp $KYLIN_HOME/tomcat-config/* $KYLIN_HOME/tomcat/conf;
-          cp $KYLIN_HOME/kylin-more-config $KYLIN_HOME/tomcat/webapps/kylin/WEB-INF/classes/
+          cp $KYLIN_HOME/kylin-more-config $KYLIN_HOME/tomcat/webapps/kylin/WEB-INF/classes/;
           $TOOL_HOME/bootstrap.sh server -d;
         ports:
         - containerPort: 7070
         - containerPort: 7443
         volumeMounts:
-        - name: kylin-query-config
-          mountPath: /home/apache_kylin/kylin/conf
-        - name: tomcat-config
-          mountPath: /home/apache_kylin/kylin/tomcat-config
-        - name: kylin-more-config
-          mountPath: /home/apache_kylin/kylin/kylin-more-config/
-        - name: kylin-logs
-          mountPath: /home/apache_kylin/kylin/logs
-        - name: tomcat-logs
-          mountPath: /home/apache_kylin/kylin/tomcat/logs
+          - name: kylin-query-config
+            mountPath: /home/apache_kylin/kylin/conf
+          - name: tomcat-config
+            mountPath: /home/apache_kylin/kylin/tomcat-config
+          - name: kylin-more-config
+            mountPath: /home/apache_kylin/kylin/kylin-more-config/
+          - name: hadoop-config
+            mountPath: /etc/hadoop/conf
+          - name: hive-config
+            mountPath: /etc/hive/conf
+          - name: hbase-config
+            mountPath: /etc/hbase/conf
+          - name: kylin-logs
+            mountPath: /home/apache_kylin/kylin/logs
+          - name: tomcat-logs
+            mountPath: /home/apache_kylin/kylin/tomcat/logs
         resources:
           requests:
             memory: 2Gi
-            cpu: 1100m
+            cpu: 700m
           limits:
             memory: 2Gi
-            cpu: 1100m
+            cpu: 700m
       - name: filebeat
         image: docker.elastic.co/beats/filebeat:6.4.3
         args:
@@ -71,10 +94,10 @@ spec:
         resources:
           requests:
             memory: 1Gi
-            cpu: 400m
+            cpu: 300m
           limits:
             memory: 1Gi
-            cpu: 400m
+            cpu: 300m
       volumes:
       - name: kylin-logs
         emptyDir:
@@ -102,4 +125,6 @@ spec:
         name: filebeat-config
       - configMap:
           name: kylin-more-config
-        name: kylin-more-config
\ No newline at end of file
+        name: kylin-more-config
+  updateStrategy:
+    type: RollingUpdate
\ No newline at end of file
diff --git a/kubernetes/example/config/kylin-job/kylin-server-log4j.properties b/kubernetes/template/production/example/deployment/memcached/memcached-service.yaml
similarity index 62%
rename from kubernetes/example/config/kylin-job/kylin-server-log4j.properties
rename to kubernetes/template/production/example/deployment/memcached/memcached-service.yaml
index 5dab102..87d0135 100644
--- a/kubernetes/example/config/kylin-job/kylin-server-log4j.properties
+++ b/kubernetes/template/production/example/deployment/memcached/memcached-service.yaml
@@ -15,16 +15,16 @@
 # limitations under the License.
 #
 
-
-#define appenders
-log4j.appender.file=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.file.layout=org.apache.log4j.PatternLayout
-log4j.appender.file.File=${catalina.home}/../logs/kylin.log
-log4j.appender.file.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}:%L : %m%n
-log4j.appender.file.Append=true
-
-#overall config
-log4j.rootLogger=INFO,file
-log4j.logger.org.apache.kylin=DEBUG
-log4j.logger.org.springframework=WARN
-log4j.logger.org.springframework.security=INFO
\ No newline at end of file
+apiVersion: v1
+kind: Service
+metadata:
+  name: cache-svc
+  namespace: kylin-example
+spec:
+  clusterIP: None
+  selector: 
+    app: kylin-memcached
+    role: cache
+  ports:
+  - port: 11211
+    targetPort: 11211
\ No newline at end of file
diff --git a/kubernetes/template/production/example/deployment/memcached/memcached-statefulset.yaml b/kubernetes/template/production/example/deployment/memcached/memcached-statefulset.yaml
new file mode 100644
index 0000000..90563c8
--- /dev/null
+++ b/kubernetes/template/production/example/deployment/memcached/memcached-statefulset.yaml
@@ -0,0 +1,59 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: kylin-memcached
+  namespace: kylin-example
+spec:
+  serviceName: cache-svc
+  replicas: 1
+  selector:
+    matchLabels:
+      app: kylin-memcached
+      role: cache
+  template:
+    metadata:
+      labels:
+        app: kylin-memcached
+        role: cache
+    spec:
+      containers:
+      - image: memcached:1.4.39
+        name: memcached
+        ports:
+        - containerPort: 11211
+        args:
+        - "-m 20480"
+        resources:
+          limits:
+            cpu: 700m
+            memory: 1Gi
+          requests:
+            cpu: 700m
+            memory: 1Gi
+        livenessProbe:
+          tcpSocket:
+            port: 11211
+          initialDelaySeconds: 30
+          timeoutSeconds: 5
+        readinessProbe:
+          tcpSocket:
+            port: 11211
+          initialDelaySeconds: 5
+          timeoutSeconds: 1
\ No newline at end of file
diff --git a/kubernetes/template/quickstart/check-cluster.sh b/kubernetes/template/quickstart/check-cluster.sh
deleted file mode 100644
index 7b63295..0000000
--- a/kubernetes/template/quickstart/check-cluster.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-## Check status
-kubectl get statefulset -n kylin-quickstart
-kubectl get pod -n kylin-quickstart --output=yaml
-kubectl get service -n kylin-quickstart --output=yaml
-
-## Check detail
-kubectl describe statefulset kylin-all -n kylin-quickstart
\ No newline at end of file
diff --git a/kubernetes/template/quickstart/cleanup.sh b/kubernetes/template/quickstart/cleanup.sh
deleted file mode 100644
index 3459103..0000000
--- a/kubernetes/template/quickstart/cleanup.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-kubectl delete -f deployment/kylin/kylin-serivce.yaml
-kubectl delete -f deployment/kylin/kylin-all-statefulset.yaml
\ No newline at end of file
diff --git a/kubernetes/template/quickstart/deploy-kylin.sh b/kubernetes/template/quickstart/deploy-kylin.sh
index 2607224..a17540d 100644
--- a/kubernetes/template/quickstart/deploy-kylin.sh
+++ b/kubernetes/template/quickstart/deploy-kylin.sh
@@ -1,12 +1,25 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 ## create namespace
 kubectl create namespace kylin-quickstart
 
-## Prepare hadoop configuration
-
-
-## Prepare kylin configuration
-
-
 ## Create configmap
 kubectl create configmap -n kylin-quickstart hadoop-config \
     --from-file=../../config/quickstart/hadoop/core-site.xml \
@@ -36,8 +49,5 @@ kubectl create configmap -n kylin-quickstart kylin-config  \
     --from-file=../../config/quickstart/kylin/setenv-tool.sh \
     --dry-run -o yaml | kubectl apply -f -
 
-## Create headless serivce
-kubectl delete -f deployment/kylin/kylin-service.yaml
-
-## Create statefulset
-kubectl create -f deployment/kylin/kylin-all-statefulset.yaml
\ No newline at end of file
+## Create kylin serivce
+kubectl apply -f deployment/kylin/
\ No newline at end of file
diff --git a/kubernetes/template/quickstart/deployment/kylin/kylin-all-statefulset.yaml b/kubernetes/template/quickstart/deployment/kylin/kylin-all-statefulset.yaml
index 461f2fe..e120285 100644
--- a/kubernetes/template/quickstart/deployment/kylin/kylin-all-statefulset.yaml
+++ b/kubernetes/template/quickstart/deployment/kylin/kylin-all-statefulset.yaml
@@ -1,48 +1,47 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 apiVersion: apps/v1
 kind: StatefulSet
 metadata:
   name: kylin-all
   namespace: kylin-quickstart
+  labels:
+    app: kylin
+    role: all
+    environment: test
+    version: {YOUR_VERSION}
+    hadoop.version: {YOUR_VERSION}
 spec:
   serviceName: kylin-svc
   replicas: 1
   selector:
-    matchExpressions:
-      - key: app
-        operator: In
-        values:
-          - kylin
-      - key: query
-        operator: In
-        values:
-          - "true"
-      - key: job
-        operator: In
-        values:
-          - "true"
+    matchLabels:
+      app: kylin
+      role: all
   template:
     metadata:
       labels:
         app: kylin
-        query: "true"
-        job: "true"
+        role: job
     spec:
-      hostAliases:
-        - ip: "10.1.3.90"
-          hostnames:
-            - "cdh-master"
-        - ip: "10.1.3.92"
-          hostnames:
-            - "cdh-worker-1"
-        - ip: "10.1.3.93"
-          hostnames:
-            - "cdh-worker-2"
-        - ip: "10.1.3.91"
-          hostnames:
-            - "cdh-client"
       containers:
         - name: kylin
-          image: kylin-client:3.0.1-cdh57
+          image: kylin-client:{YOUR_VERSION}
           imagePullPolicy: IfNotPresent
           command:
             - sh
diff --git a/kubernetes/template/quickstart/deployment/kylin/kylin-service.yaml b/kubernetes/template/quickstart/deployment/kylin/kylin-service.yaml
index f237133..607e26c 100644
--- a/kubernetes/template/quickstart/deployment/kylin/kylin-service.yaml
+++ b/kubernetes/template/quickstart/deployment/kylin/kylin-service.yaml
@@ -1,3 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 apiVersion: v1
 kind: Service
 metadata:
@@ -8,10 +25,12 @@ spec:
   - name: http
     port: 80
     targetPort: 7070
+    nodePort: 30002
   - name: https
     port: 443
     targetPort: 7443
+    nodePort: 30003
   selector:
     app: kylin
-    query: "true"
-  type: LoadBalancer
\ No newline at end of file
+    role: all
+  type: NodePort # LoadBalancer
\ No newline at end of file


[kylin] 01/04: KYLIN-4181 Schedule Kylin using Kubernetes

Posted by xx...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

xxyu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kylin.git

commit 0fdf7764a2dc3c5f6d025580a16d397b32fffc7c
Author: Temple Zhou <db...@gmail.com>
AuthorDate: Sun Apr 26 13:04:32 2020 +0800

    KYLIN-4181 Schedule Kylin using Kubernetes
---
 .gitignore                              |   4 +-
 kubernetes/Dockerfile                   |  78 +++++++++++++++++++++++
 kubernetes/README.md                    | 109 ++++++++++++++++++++++++++++++++
 kubernetes/kylin-configmap.sh           |  17 +++++
 kubernetes/kylin-job-statefulset.yaml   |  95 ++++++++++++++++++++++++++++
 kubernetes/kylin-query-statefulset.yaml |  95 ++++++++++++++++++++++++++++
 kubernetes/kylin-secret.sh              |   3 +
 kubernetes/kylin-service.yaml           |  44 +++++++++++++
 8 files changed, 444 insertions(+), 1 deletion(-)

diff --git a/.gitignore b/.gitignore
index 98da29c..69d61d0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -93,4 +93,6 @@ dependency-reduced-pom.xml
 webapp/package-lock.json
 
 # stream_index
-stream-receiver/stream_index
\ No newline at end of file
+stream-receiver/stream_index
+# configuration files
+kubernetes/conf/*
diff --git a/kubernetes/Dockerfile b/kubernetes/Dockerfile
new file mode 100644
index 0000000..6454417
--- /dev/null
+++ b/kubernetes/Dockerfile
@@ -0,0 +1,78 @@
+FROM centos:6.9
+
+ARG APACHE_MIRRORS=http://mirrors.aliyun.com
+ENV APACHE_MIRRORS  ${APACHE_MIRRORS}
+
+ENV JAVA_VERSION    1.8.0
+ENV SPARK_VERSION   2.3.4
+ENV KAFKA_VERSION   2.1.1
+ENV KYLIN_VERSION   3.0.0
+
+ENV JAVA_HOME       /usr/lib/jvm/java-${JAVA_VERSION}
+ENV HADOOP_HOME     /usr/lib/hadoop
+ENV HIVE_HOME       /usr/lib/hive
+ENV HCAT_HOME       /usr/lib/hive-hcatalog
+ENV HBASE_HOME      /usr/lib/hbase
+ENV SPARK_HOME      /opt/spark-${SPARK_VERSION}-bin-hadoop2.6
+ENV KAFKA_HOME      /opt/kafka_2.11-${KAFKA_VERSION}
+ENV KYLIN_HOME      /opt/apache-kylin-${KYLIN_VERSION}-bin-cdh57
+
+ENV PATH $PATH:\
+$SPARK_HOME/bin:\
+$KAFKA_HOME/bin:\
+$KYLIN_HOME/bin
+
+ENV HADOOP_CONF_DIR  /etc/hadoop/conf
+ENV HIVE_CONF_DIR    /etc/hive/conf
+ENV HBASE_CONF_DIR   /etc/hbase/conf
+ENV HIVE_CONF        ${HIVE_CONF_DIR}
+ENV HIVE_LIB         ${HIVE_HOME}/lib
+
+RUN echo $'[cloudera-cdh5] \n\
+# Packages for Cloudera\'s Distribution for Hadoop, Version 5, on RedHat or CentOS 6 x86_64 \n\
+name=Cloudera\'s Distribution for Hadoop, Version 5 \n\
+baseurl=https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/5.7.6/ \n\
+gpgkey =https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/RPM-GPG-KEY-cloudera \n\
+gpgcheck = 1' > /etc/yum.repos.d/cloudera-cdh5.repo
+
+WORKDIR /opt
+
+# Download Kafka from APACHE_MIRRORS
+RUN set -xeu && \
+    curl -o kafka_2.11-${KAFKA_VERSION}.tgz \
+    ${APACHE_MIRRORS}/apache/kafka/${KAFKA_VERSION}/kafka_2.11-${KAFKA_VERSION}.tgz && \
+    tar -zxf kafka_2.11-${KAFKA_VERSION}.tgz && rm kafka_2.11-${KAFKA_VERSION}.tgz
+
+# Download Spark from APACHE_MIRRORS
+RUN set -xeu && \
+    curl -o spark-${SPARK_VERSION}-bin-hadoop2.6.tgz \
+    ${APACHE_MIRRORS}/apache/spark/spark-${SPARK_VERSION}/spark-${SPARK_VERSION}-bin-hadoop2.6.tgz && \
+    tar -zxf spark-${SPARK_VERSION}-bin-hadoop2.6.tgz && rm spark-${SPARK_VERSION}-bin-hadoop2.6.tgz
+
+# Download Kylin from APACHE_MIRRORS
+RUN set -xeu && \
+    curl -o apache-kylin-${KYLIN_VERSION}-bin-cdh57.tar.gz \
+    ${APACHE_MIRRORS}/apache/kylin/apache-kylin-${KYLIN_VERSION}/apache-kylin-${KYLIN_VERSION}-bin-cdh57.tar.gz && \
+    tar -zxf apache-kylin-${KYLIN_VERSION}-bin-cdh57.tar.gz && rm apache-kylin-${KYLIN_VERSION}-bin-cdh57.tar.gz
+
+# Setup Hadoop & Hive & HBase using CDH Repository. PS: The libhadoop.so provided by CDH is complied with snappy
+RUN set -xeu && \
+    yum -y -q install java-1.8.0-openjdk-devel && \
+    yum -y -q install krb5-workstation && \
+    yum -y -q install hadoop-client && \
+    yum -y -q install hive hive-hcatalog && \
+    yum -y -q install hbase && \
+    curl -o ${HIVE_HOME}/lib/hadoop-lzo-0.4.15.jar \
+    https://clojars.org/repo/hadoop-lzo/hadoop-lzo/0.4.15/hadoop-lzo-0.4.15.jar && \
+    curl -o ${HIVE_HOME}/lib/mysql-connector-java-5.1.24.jar \
+    https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.24/mysql-connector-java-5.1.24.jar && \
+    yum -q clean all && \
+    rm -rf /var/cache/yum && \
+    rm -rf /tmp/* /var/tmp/* && \
+    groupadd kylin --gid 1000 && \
+    useradd kylin --uid 1000 --gid 1000 && \
+    chown -R "kylin:kylin" ${KYLIN_HOME}
+
+EXPOSE 7070
+USER kylin:kylin
+CMD ${KYLIN_HOME}/bin/kylin.sh run
\ No newline at end of file
diff --git a/kubernetes/README.md b/kubernetes/README.md
new file mode 100644
index 0000000..205e3f3
--- /dev/null
+++ b/kubernetes/README.md
@@ -0,0 +1,109 @@
+# Kubernetes QuickStart
+
+This guide shows how to run Kylin cluster using Kubernetes StatefulSet Controller. The following figure depicts a typical scenario for Kylin cluster mode deployment:
+
+![image_name](http://kylin.apache.org/images/install/kylin_server_modes.png)
+
+## Build or Pull Docker Image
+
+You can pull the image from Docker Hub directly if you do not want to build the image locally:
+
+```bash
+docker pull apachekylin/apache-kylin:3.0.0-cdh57
+```
+
+TIPS: If you are woking with air-gapped network or slow internet speeds, we suggest you prepare the binary packages by yourself and execute this:
+
+```bash
+docker build -t "apache-kylin:${KYLIN_VERSION}-cdh57" --build-arg APACHE_MIRRORS=http://127.0.0.1:8000 .
+```
+
+## Prepare your Hadoop Configuration
+
+Put all of the configuration files under the "conf" directory.
+
+```bash
+kylin.properties
+applicationContext.xml  # If you need to set cacheManager to Memcached
+hbase-site.xml
+hive-site.xml
+hdfs-site.xml
+core-site.xml
+mapred-site.xml
+yarn-site.xml
+```
+
+If you worked with Kerberized Hadoop Cluster, do not forget to prepare the following files:
+
+```bash
+krb5.conf
+kylin.keytab
+```
+
+## Create ConfigMaps and Secret
+
+We recommand you to create separate Kubernetes namespace for Kylin.
+
+```bash
+kubectl create namespace kylin
+```
+
+Execute the following shell scripts to create the required ConfigMaps:
+
+```bash
+./kylin-configmap.sh
+./kylin-secret.sh
+```
+
+## Create Service and StatefulSet
+
+Make sure the following resources exist in your namespace:
+
+```bash
+kubectl get configmaps,secret -n kylin
+
+NAME                      DATA   AGE
+configmap/hadoop-config   4      89d
+configmap/hbase-config    1      89d
+configmap/hive-config     1      89d
+configmap/krb5-config     1      89d
+configmap/kylin-config    1      89d
+configmap/kylin-context   1      45d
+
+NAME                         TYPE                                  DATA   AGE
+secret/kylin-keytab          Opaque                                1      89d
+
+```
+
+Then, you need to create headless service for stable DNS entries(kylin-0.kylin, kylin-1.kylin, kylin-2.kylin...) of StatefulSet members.
+
+```bash
+kubectl apply -f kylin-service.yaml
+```
+
+Finally, create the StatefulSet and try to use it:
+
+```bash
+kubectl apply -f kylin-job-statefulset.yaml
+kubectl apply -f kylin-query-statefulset.yaml
+```
+
+If everything goes smoothly, you should see all 3 Pods become Running:
+
+```bash
+kubectl get statefulset,pod,service -n kylin
+
+NAME                           READY   AGE
+statefulset.apps/kylin-job     1/1     36d
+statefulset.apps/kylin-query   3/3     36d
+
+NAME                READY   STATUS    RESTARTS   AGE
+pod/kylin-job-0     1/1     Running   0          13m
+pod/kylin-query-0   1/1     Running   0          40h
+pod/kylin-query-1   1/1     Running   0          40h
+
+NAME                  TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
+service/kylin         ClusterIP   None             <none>        7070/TCP   58d
+service/kylin-job     ClusterIP   xx.xxx.xx.xx     <none>        7070/TCP   89d
+service/kylin-query   ClusterIP   xx.xxx.xxx.xxx   <none>        7070/TCP   89d
+```
diff --git a/kubernetes/kylin-configmap.sh b/kubernetes/kylin-configmap.sh
new file mode 100755
index 0000000..b8ec1b9
--- /dev/null
+++ b/kubernetes/kylin-configmap.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+kubectl create configmap -n kylin hadoop-config --from-file=conf/core-site.xml \
+                                                --from-file=conf/hdfs-site.xml \
+                                                --from-file=conf/yarn-site.xml \
+                                                --from-file=conf/mapred-site.xml \
+                                                --dry-run -o yaml | kubectl apply -f -
+kubectl create configmap -n kylin hive-config   --from-file=conf/hive-site.xml \
+                                                --dry-run -o yaml | kubectl apply -f -
+kubectl create configmap -n kylin hbase-config  --from-file=conf/hbase-site.xml \
+                                                --dry-run -o yaml | kubectl apply -f -
+kubectl create configmap -n kylin kylin-config  --from-file=conf/kylin.properties \
+                                                --dry-run -o yaml | kubectl apply -f -
+kubectl create configmap -n kylin krb5-config   --from-file=conf/krb5.conf \
+                                                --dry-run -o yaml | kubectl apply -f -
+kubectl create configmap -n kylin kylin-context --from-file=conf/applicationContext.xml \
+                                                --dry-run -o yaml | kubectl apply -f -
diff --git a/kubernetes/kylin-job-statefulset.yaml b/kubernetes/kylin-job-statefulset.yaml
new file mode 100644
index 0000000..2a0f9fe
--- /dev/null
+++ b/kubernetes/kylin-job-statefulset.yaml
@@ -0,0 +1,95 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  annotations: {}
+  name: kylin-job
+  namespace: kylin
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: kylin
+      type: job
+  serviceName: kylin
+  template:
+    metadata:
+      labels:
+        app: kylin
+        type: job
+    spec:
+      containers:
+        - image: 'apachekylin/apache-kylin:3.0.0-cdh57'
+          imagePullPolicy: Always
+          lifecycle:
+            postStart:
+              exec:
+                command:
+                  - bash
+                  - '-c'
+                  - |
+                    set -ex
+                    # initialize the keytab
+                    kinit -kt /home/kylin/kylin.keytab kylin
+                    # set the kylin.server.mode
+                    sed "s/kylin\.server\.mode.*/kylin\.server\.mode=all/g" /mnt/kylin-config/kylin.properties > ${KYLIN_HOME}/conf/kylin.properties
+                    sed -i "s/kylin\.server\.host-address.*/kylin\.server\.host-address=`hostname`\.kylin:7070/g" ${KYLIN_HOME}/conf/kylin.properties
+                    sed -i "s/export KYLIN_JVM_SETTINGS.*/export KYLIN_JVM_SETTINGS=\"-Xms40g -Xmx40g -XX:NewSize=10g -XX:MaxNewSize=10g -XX:SurvivorRatio=3 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError\"/g" ${KYLIN_HOME}/conf/setenv.sh
+                    # unarchive the war file and replace the applicationContext if needed
+                    mkdir ${KYLIN_HOME}/tomcat/webapps/kylin
+                    cd ${KYLIN_HOME}/tomcat/webapps/kylin
+                    jar -xvf ${KYLIN_HOME}/tomcat/webapps/kylin.war
+                    cp /mnt/kylin-context/applicationContext.xml ${KYLIN_HOME}/tomcat/webapps/kylin/WEB-INF/classes
+          name: kylin
+          ports:
+            - containerPort: 7070
+          readinessProbe:
+            httpGet:
+              path: /kylin
+              port: 7070
+          resources:
+            limits:
+              cpu: 16
+              memory: 50G
+            requests:
+              cpu: 8
+              memory: 50G
+          volumeMounts:
+            - mountPath: /etc/hadoop/conf
+              name: hadoop-config
+            - mountPath: /etc/hive/conf
+              name: hive-config
+            - mountPath: /etc/hbase/conf
+              name: hbase-config
+            - mountPath: /home/kylin
+              name: kylin-keytab
+            - mountPath: /etc/krb5.conf
+              name: krb5-config
+              subPath: krb5.conf
+            - mountPath: /mnt/kylin-context
+              name: kylin-context
+            - mountPath: /mnt/kylin-config
+              name: kylin-config
+      volumes:
+        - configMap:
+            name: hadoop-config
+          name: hadoop-config
+        - configMap:
+            name: hive-config
+          name: hive-config
+        - configMap:
+            name: hbase-config
+          name: hbase-config
+        - configMap:
+            name: kylin-config
+          name: kylin-config
+        - configMap:
+            name: krb5-config
+          name: krb5-config
+        - configMap:
+            name: kylin-context
+          name: kylin-context
+        - name: kylin-keytab
+          secret:
+            secretName: kylin-keytab
+  updateStrategy:
+    type: RollingUpdate
diff --git a/kubernetes/kylin-query-statefulset.yaml b/kubernetes/kylin-query-statefulset.yaml
new file mode 100644
index 0000000..f504a58
--- /dev/null
+++ b/kubernetes/kylin-query-statefulset.yaml
@@ -0,0 +1,95 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  annotations: {}
+  name: kylin-query
+  namespace: kylin
+spec:
+  replicas: 3
+  selector:
+    matchLabels:
+      app: kylin
+      type: query
+  serviceName: kylin
+  template:
+    metadata:
+      labels:
+        app: kylin
+        type: query
+    spec:
+      containers:
+        - image: 'apachekylin/apache-kylin:3.0.0-cdh57'
+          imagePullPolicy: Always
+          lifecycle:
+            postStart:
+              exec:
+                command:
+                  - bash
+                  - '-c'
+                  - |
+                    set -ex
+                    # initialize the keytab
+                    kinit -kt /home/kylin/kylin.keytab kylin
+                    # set the kylin.server.mode
+                    sed "s/kylin\.server\.mode.*/kylin\.server\.mode=query/g" /mnt/kylin-config/kylin.properties > ${KYLIN_HOME}/conf/kylin.properties
+                    sed -i "s/kylin\.server\.host-address.*/kylin\.server\.host-address=`hostname`\.kylin:7070/g" ${KYLIN_HOME}/conf/kylin.properties
+                    sed -i "s/export KYLIN_JVM_SETTINGS.*/export KYLIN_JVM_SETTINGS=\"-Xms16g -Xmx16g -XX:NewSize=3g -XX:MaxNewSize=3g -XX:SurvivorRatio=4 -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode -XX:CMSInitiatingOccupancyFraction=70 -XX:+DisableExplicitGC -XX:+HeapDumpOnOutOfMemoryError\"/g" ${KYLIN_HOME}/conf/setenv.sh
+                    # unarchive the war file and replace the applicationContext if needed
+                    mkdir ${KYLIN_HOME}/tomcat/webapps/kylin
+                    cd ${KYLIN_HOME}/tomcat/webapps/kylin
+                    jar -xvf ${KYLIN_HOME}/tomcat/webapps/kylin.war
+                    cp /mnt/kylin-context/applicationContext.xml ${KYLIN_HOME}/tomcat/webapps/kylin/WEB-INF/classes
+          name: kylin
+          ports:
+            - containerPort: 7070
+          readinessProbe:
+            httpGet:
+              path: /kylin
+              port: 7070
+          resources:
+            limits:
+              cpu: 8
+              memory: 20G
+            requests:
+              cpu: 8
+              memory: 20G
+          volumeMounts:
+            - mountPath: /etc/hadoop/conf
+              name: hadoop-config
+            - mountPath: /etc/hive/conf
+              name: hive-config
+            - mountPath: /etc/hbase/conf
+              name: hbase-config
+            - mountPath: /home/kylin
+              name: kylin-keytab
+            - mountPath: /etc/krb5.conf
+              name: krb5-config
+              subPath: krb5.conf
+            - mountPath: /mnt/kylin-context
+              name: kylin-context
+            - mountPath: /mnt/kylin-config
+              name: kylin-config
+      volumes:
+        - configMap:
+            name: hadoop-config
+          name: hadoop-config
+        - configMap:
+            name: hive-config
+          name: hive-config
+        - configMap:
+            name: hbase-config
+          name: hbase-config
+        - configMap:
+            name: kylin-config
+          name: kylin-config
+        - configMap:
+            name: krb5-config
+          name: krb5-config
+        - configMap:
+            name: kylin-context
+          name: kylin-context
+        - name: kylin-keytab
+          secret:
+            secretName: kylin-keytab
+  updateStrategy:
+    type: RollingUpdate
diff --git a/kubernetes/kylin-secret.sh b/kubernetes/kylin-secret.sh
new file mode 100755
index 0000000..87ab71e
--- /dev/null
+++ b/kubernetes/kylin-secret.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+kubectl create secret -n kylin generic kylin-keytab --from-file=conf/kylin.keytab
\ No newline at end of file
diff --git a/kubernetes/kylin-service.yaml b/kubernetes/kylin-service.yaml
new file mode 100644
index 0000000..50c3206
--- /dev/null
+++ b/kubernetes/kylin-service.yaml
@@ -0,0 +1,44 @@
+# Headless service for stable DNS entries of StatefulSet members.
+apiVersion: v1
+kind: Service
+metadata:
+  name: kylin
+  labels:
+    app: kylin
+spec:
+  ports:
+    - name: kylin
+      port: 7070
+  clusterIP: None
+  selector:
+    app: kylin
+---
+# For job instances.
+apiVersion: v1
+kind: Service
+metadata:
+  name: kylin-job
+spec:
+  type: ClusterIP
+  selector:
+    app: kylin
+    type: job
+  ports:
+    - protocol: TCP
+      port: 7070
+      targetPort: 7070
+---
+# For query instances.
+apiVersion: v1
+kind: Service
+metadata:
+  name: kylin-query
+spec:
+  type: ClusterIP
+  selector:
+    app: kylin
+    type: query
+  ports:
+    - protocol: TCP
+      port: 7070
+      targetPort: 7070
\ No newline at end of file