You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hawq.apache.org by es...@apache.org on 2017/02/03 09:00:21 UTC

[19/50] [abbrv] incubator-hawq git commit: HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base.

HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/440ce595
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/440ce595
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/440ce595

Branch: refs/heads/2.1.0.0-incubating
Commit: 440ce595a2298ac9be16973f0c7c4c358ddb2cd0
Parents: c8be9f2
Author: Richard Guo <gu...@gmail.com>
Authored: Tue Jan 3 17:12:59 2017 +0800
Committer: Ruilong Huo <rh...@pivotal.io>
Committed: Fri Jan 13 19:16:28 2017 +0800

----------------------------------------------------------------------
 contrib/hawq-docker/Makefile                    | 222 ++++++++++++++
 contrib/hawq-docker/README.md                   |  97 +++++++
 .../centos6-docker/hawq-dev/Dockerfile          | 123 ++++++++
 .../centos6-docker/hawq-test/Dockerfile         |  40 +++
 .../hawq-test/conf/capacity-scheduler.xml       | 134 +++++++++
 .../hawq-test/conf/configuration.xsl            |  40 +++
 .../hawq-test/conf/container-executor.cfg       |   4 +
 .../centos6-docker/hawq-test/conf/core-site.xml |  24 ++
 .../hawq-test/conf/hadoop-env.cmd               |  92 ++++++
 .../centos6-docker/hawq-test/conf/hadoop-env.sh | 110 +++++++
 .../hawq-test/conf/hadoop-metrics.properties    |  75 +++++
 .../hawq-test/conf/hadoop-metrics2.properties   |  68 +++++
 .../hawq-test/conf/hadoop-policy.xml            | 226 ++++++++++++++
 .../centos6-docker/hawq-test/conf/hdfs-site.xml | 100 +++++++
 .../centos6-docker/hawq-test/conf/kms-acls.xml  | 135 +++++++++
 .../centos6-docker/hawq-test/conf/kms-env.sh    |  55 ++++
 .../hawq-test/conf/kms-log4j.properties         |  38 +++
 .../centos6-docker/hawq-test/conf/kms-site.xml  | 173 +++++++++++
 .../hawq-test/conf/log4j.properties             | 291 +++++++++++++++++++
 .../hawq-test/conf/mapred-env.cmd               |  20 ++
 .../centos6-docker/hawq-test/conf/mapred-env.sh |  27 ++
 .../hawq-test/conf/mapred-queues.xml.template   |  92 ++++++
 .../hawq-test/conf/mapred-site.xml.template     |  21 ++
 .../centos6-docker/hawq-test/conf/slaves        |   1 +
 .../hawq-test/conf/ssl-client.xml.example       |  80 +++++
 .../hawq-test/conf/ssl-server.xml.example       |  78 +++++
 .../centos6-docker/hawq-test/conf/yarn-env.cmd  |  60 ++++
 .../centos6-docker/hawq-test/entrypoint.sh      |  34 +++
 .../centos6-docker/hawq-test/start-hdfs.sh      |  39 +++
 .../centos7-docker/hawq-dev/Dockerfile          |  75 +++++
 .../centos7-docker/hawq-test/Dockerfile         |  40 +++
 .../hawq-test/conf/capacity-scheduler.xml       | 134 +++++++++
 .../hawq-test/conf/configuration.xsl            |  40 +++
 .../hawq-test/conf/container-executor.cfg       |   4 +
 .../centos7-docker/hawq-test/conf/core-site.xml |  24 ++
 .../hawq-test/conf/hadoop-env.cmd               |  92 ++++++
 .../centos7-docker/hawq-test/conf/hadoop-env.sh | 110 +++++++
 .../hawq-test/conf/hadoop-metrics.properties    |  75 +++++
 .../hawq-test/conf/hadoop-metrics2.properties   |  68 +++++
 .../hawq-test/conf/hadoop-policy.xml            | 226 ++++++++++++++
 .../centos7-docker/hawq-test/conf/hdfs-site.xml | 100 +++++++
 .../centos7-docker/hawq-test/conf/kms-acls.xml  | 135 +++++++++
 .../centos7-docker/hawq-test/conf/kms-env.sh    |  55 ++++
 .../hawq-test/conf/kms-log4j.properties         |  38 +++
 .../centos7-docker/hawq-test/conf/kms-site.xml  | 173 +++++++++++
 .../hawq-test/conf/log4j.properties             | 291 +++++++++++++++++++
 .../hawq-test/conf/mapred-env.cmd               |  20 ++
 .../centos7-docker/hawq-test/conf/mapred-env.sh |  27 ++
 .../hawq-test/conf/mapred-queues.xml.template   |  92 ++++++
 .../hawq-test/conf/mapred-site.xml.template     |  21 ++
 .../centos7-docker/hawq-test/conf/slaves        |   1 +
 .../hawq-test/conf/ssl-client.xml.example       |  80 +++++
 .../hawq-test/conf/ssl-server.xml.example       |  78 +++++
 .../centos7-docker/hawq-test/conf/yarn-env.cmd  |  60 ++++
 .../centos7-docker/hawq-test/entrypoint.sh      |  33 +++
 .../centos7-docker/hawq-test/start-hdfs.sh      |  39 +++
 56 files changed, 4630 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/Makefile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/Makefile b/contrib/hawq-docker/Makefile
new file mode 100644
index 0000000..120ebe2
--- /dev/null
+++ b/contrib/hawq-docker/Makefile
@@ -0,0 +1,222 @@
+#!/usr/bin/make all
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+THIS_MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+TOP_DIR := $(abspath $(dir ${THIS_MAKEFILE_PATH}))
+NDATANODES := 3
+CUR_DATANODE := 1
+OS_VERSION := centos7
+# Do not use underscore "_" in CLUSTER_ID
+CLUSTER_ID := $(OS_VERSION)
+# Monut this local directory to /data in data container and share with other containers
+LOCAL := 
+# networks used in docker
+NETWORK := $(CLUSTER_ID)_hawq_network
+
+all: 
+	@echo " Usage:"
+	@echo "    To setup a build and test environment:         make run"
+	@echo "    To start all containers:                       make start"
+	@echo "    To stop all containers:                        make stop"
+	@echo "    To remove hdfs containers:                     make clean"
+	@echo "    To remove all containers:                      make distclean"
+	@echo ""
+	@echo "    To build images locally:                       make build"
+	@echo "    To pull latest images:                         make pull"
+
+build:
+	@make -f $(THIS_MAKEFILE_PATH) build-hawq-dev-$(OS_VERSION)
+	@make -f $(THIS_MAKEFILE_PATH) build-hawq-test-$(OS_VERSION)
+	@echo "Build Images Done!"
+
+build-hawq-dev-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/Dockerfile
+	@echo build hawq-dev:$(OS_VERSION) image
+	docker build -t hawq/hawq-dev:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/
+
+build-hawq-test-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/Dockerfile
+	@echo build hawq-test:$(OS_VERSION) image
+	docker build -t hawq/hawq-test:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/
+
+create-data-container:
+	@echo create ${CLUSTER_ID}-data container
+	@if [ ! -z "$(LOCAL)" -a ! -d "$(LOCAL)" ]; then \
+		echo "LOCAL must be set to a directory!"; \
+		exit 1; \
+	fi
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-data$$" | grep -v CONTAINER`" ]; then \
+		if [ -z "$(LOCAL)" ]; then \
+			docker create -v /data --name=${CLUSTER_ID}-data hawq/hawq-dev:$(OS_VERSION) /bin/true; \
+		else \
+			docker create -v $(LOCAL):/data --name=${CLUSTER_ID}-data hawq/hawq-dev:$(OS_VERSION) /bin/true; \
+		fi \
+	else \
+		echo "${CLUSTER_ID}-data container already exist!"; \
+	fi
+
+run:
+	@if [ -z "`docker network ls 2>/dev/null`" ]; then \
+ 		make -f $(THIS_MAKEFILE_PATH) NETWORK=default create-data-container && \
+		make -f $(THIS_MAKEFILE_PATH) NETWORK=default run-hdfs; \
+	else \
+		if [ -z "`docker network ls 2>/dev/null | grep $(NETWORK)`" ]; then \
+			echo create network $(NETWORK) && \
+			docker network create --driver bridge $(NETWORK); \
+		fi && \
+		make -f $(THIS_MAKEFILE_PATH) create-data-container && \
+		make -f $(THIS_MAKEFILE_PATH) run-hdfs; \
+	fi
+
+run-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) run-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i run-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "HAWQ Environment Setup Done!"
+	@echo 'run "docker exec -it ${CLUSTER_ID}-namenode bash" to attach to ${CLUSTER_ID}-namenode node'
+
+run-namenode-container:
+	@echo "run ${CLUSTER_ID}-namenode container"
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker run --privileged -itd --net=$(NETWORK) --hostname=${CLUSTER_ID}-namenode --name=${CLUSTER_ID}-namenode \
+			--volumes-from ${CLUSTER_ID}-data hawq/hawq-test:$(OS_VERSION); \
+	else \
+		echo "${CLUSTER_ID}-namenode container already exist!"; \
+	fi
+
+run-datanode-container:
+	@echo "run ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker run --privileged -itd --net=$(NETWORK) --hostname=${CLUSTER_ID}-datanode$(CUR_DATANODE) \
+			--name=${CLUSTER_ID}-datanode$(CUR_DATANODE) -e NAMENODE=${CLUSTER_ID}-namenode \
+			--volumes-from ${CLUSTER_ID}-data hawq/hawq-test:$(OS_VERSION); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container already exist!"; \
+	fi
+
+start:
+	@make -f $(THIS_MAKEFILE_PATH) start-hdfs
+	@echo 'run "docker exec -it ${CLUSTER_ID}-namenode bash" to attach to ${CLUSTER_ID}-namenode node'
+
+start-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) start-namenode-container
+	@i=1;\
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i start-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Start All Containers Done!"
+
+start-namenode-container:
+	@echo "start ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker start ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \
+	fi
+
+start-datanode-container:
+	@echo "start ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker start ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \
+	fi
+
+stop:
+	@make -f $(THIS_MAKEFILE_PATH) stop-hdfs
+
+stop-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) stop-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i stop-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Stop All Containers Done!"
+
+stop-namenode-container:
+	@echo "stop ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker stop -t 0 ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!"; \
+	fi
+
+stop-datanode-container:
+	@echo "stop ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker stop -t 0 ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!"; \
+	fi
+
+remove-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) remove-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i remove-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Remove HDFS Done!"
+
+remove-namenode-container:
+	@echo "make ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!"; \
+	fi
+
+remove-datanode-container:
+	@echo "make ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!"; \
+	fi
+
+remove-data:
+	@echo remove ${CLUSTER_ID}-data container
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-data" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-data; \
+	else \
+		echo "${CLUSTER_ID}-data container does not exist!"; \
+	fi
+
+pull:
+	@echo latest images
+	docker pull hawq/hawq-dev:$(OS_VERSION)
+	docker pull hawq/hawq-test:$(OS_VERSION)
+
+clean:
+	@make -f $(THIS_MAKEFILE_PATH) stop 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-hdfs 2>&1 >/dev/null || true
+	@echo "Clean Done!"
+
+distclean:
+	@make -f $(THIS_MAKEFILE_PATH) stop 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-hdfs 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-data 2>&1 >/dev/null || true
+	@if [ ! -z "`docker network ls 2>/dev/null | grep $(NETWORK)`" ]; then \
+		echo remove network $(NETWORK); \
+		docker network rm $(NETWORK) 2>&1 >/dev/null || true; \
+	fi
+	@echo "Distclean Done!"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/README.md
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/README.md b/contrib/hawq-docker/README.md
new file mode 100644
index 0000000..4adeaaf
--- /dev/null
+++ b/contrib/hawq-docker/README.md
@@ -0,0 +1,97 @@
+# hawq-docker
+
+hawq-docker is based on *wangzw's* repo *hawq-devel-env*. It is the docker images and scripts to help developers of Apache HAWQ to setup building and testing environment with docker.
+
+Both CentOS 7 and CentOS 6 are supported.
+Change variable **OS_VERSION** (:= centos7 OR centos6) in Makefile to switch between CentOS 7 and CentOS 6.
+
+Take CentOS 7 as an example below.
+
+# Install docker
+* following the instructions to install docker.
+https://docs.docker.com/
+
+# Setup build and test environment
+* clone hawq repository
+```
+git clone https://github.com/apache/incubator-hawq.git .
+cd incubator-hawq/contrib/hawq-docker
+```
+* Get the docker images
+```
+  make pull (recommended)
+OR
+  make build
+``` 
+(Command `make pull` is to pull docker images from Docker Hub, while command `make build` is to build docker images locally. In general, `make pull` is faster than `make build`.)
+* setup a 5 nodes virtual cluster for Apache HAWQ build and test.
+```
+make run
+```
+Now let's have a look about what we creted.
+```
+[root@localhost hawq-docker]# docker ps -a
+CONTAINER ID        IMAGE                          COMMAND                CREATED             STATUS              PORTS               NAMES
+382b2b3360d1        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode3
+86513c331d45        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode2
+c0ab10e46e4a        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode1
+e27beea63953        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-namenode
+1f986959bd04        hawq/hawq-dev:centos7    "/bin/true"            2 minutes ago       Created                                 centos7-data
+```
+**centos7-data** is a data container and mounted to /data directory on all other containers to provide a shared storage for the cluster. 
+
+# Build and Test Apache HAWQ
+* attach to namenode
+```
+docker exec -it centos7-namenode bash
+```
+* check if HDFS working well
+```
+sudo -u hdfs hdfs dfsadmin -report
+```
+* clone Apache HAWQ code to /data direcotry
+```
+git clone https://github.com/apache/incubator-hawq.git /data/hawq
+```
+* build Apache HAWQ
+```
+cd /data/hawq
+./configure --prefix=/data/hawq-dev
+make
+make install
+```
+(When you are using CentOS 6, run command `scl enable devtoolset-2 bash` before
+configuring hawq and run command `exit` after installing hawq.) 
+* modify Apache HAWQ configuration
+```
+sed 's|localhost|centos7-namenode|g' -i /data/hawq-dev/etc/hawq-site.xml
+echo 'centos7-datanode1' >  /data/hawq-dev/etc/slaves
+echo 'centos7-datanode2' >>  /data/hawq-dev/etc/slaves
+echo 'centos7-datanode3' >>  /data/hawq-dev/etc/slaves
+```
+* Initialize Apache HAWQ cluster
+```
+sudo -u hdfs hdfs dfs -chown gpadmin /
+source /data/hawq-dev/greenplum_path.sh
+hawq init cluster
+```
+Now you can connect to database with `psql` command.
+```
+[gpadmin@centos7-namenode data]$ psql -d postgres
+psql (8.2.15)
+Type "help" for help.
+
+postgres=# 
+```
+# More command with this script
+```
+ Usage:
+    To setup a build and test environment:         make run
+    To start all containers:                       make start
+    To stop all containers:                        make stop
+    To remove hdfs containers:                     make clean
+    To remove all containers:                      make distclean
+    To build images locally:                       make build
+    To pull latest images:                         make pull
+```
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
new file mode 100644
index 0000000..9fb8476
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
@@ -0,0 +1,123 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM centos:6
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+# install all software we need
+RUN yum install -y epel-release && \
+ yum makecache && \
+ yum install -y man passwd sudo tar which git mlocate links make bzip2 \
+ autoconf automake libtool m4 gcc gcc-c++ gdb flex cmake gperf indent \
+ libuuid-devel krb5-devel libgsasl-devel expat-devel libxml2-devel \
+ perl-ExtUtils-Embed pam-devel python-devel snappy-devel \
+ libyaml-devel libevent-devel bzip2-devel openssl-devel \
+ openldap-devel readline-devel net-snmp-devel apr-devel \
+ libesmtp-devel xerces-c-devel python-pip json-c-devel \
+ apache-ivy java-1.7.0-openjdk-devel wget \
+ openssh-clients openssh-server perl-JSON && \
+ yum clean all
+
+# update gcc
+RUN wget -O /etc/yum.repos.d/slc6-devtoolset.repo http://linuxsoft.cern.ch/cern/devtoolset/slc6-devtoolset.repo && \
+ rpm --import http://ftp.scientificlinux.org/linux/scientific/5x/x86_64/RPM-GPG-KEYs/RPM-GPG-KEY-cern && \
+ yum install -y devtoolset-2-gcc devtoolset-2-binutils devtoolset-2-gcc-c++ && \
+ echo "source /opt/rh/devtoolset-2/enable" >> ~/.bashrc && \
+ source ~/.bashrc
+
+# install libcurl 7.45.0
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "http://curl.haxx.se/download/curl-7.45.0.tar.bz2" -o curl-7.45.0.tar.bz2 && \
+ tar -xjf curl-7.45.0.tar.bz2 && cd curl-7.45.0 && \
+ ./configure --prefix=/usr && make && make install && \
+ rm -rf /tmp/build && ldconfig
+
+# install maven
+RUN curl -L "http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo" -o /etc/yum.repos.d/epel-apache-maven.repo && \
+ yum install -y apache-maven && \
+ yum clean all
+
+# OS requirements
+RUN echo "kernel.sem = 250 512000 100 2048" >> /etc/sysctl.conf
+
+# setup ssh server and keys for root
+RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+# setup JAVA_HOME for all users
+RUN echo "#!/bin/sh" > /etc/profile.d/java.sh && \
+ echo "export JAVA_HOME=/etc/alternatives/java_sdk" >> /etc/profile.d/java.sh && \
+ chmod a+x /etc/profile.d/java.sh
+
+# install boost 1.59
+ RUN mkdir -p /tmp/build && \
+  cd /tmp/build && curl -L "http://downloads.sourceforge.net/project/boost/boost/1.59.0/boost_1_59_0.tar.bz2" -o boost_1_59_0.tar.bz2 && \
+  tar -xjf boost_1_59_0.tar.bz2 && cd boost_1_59_0 && \
+  ./bootstrap.sh && ./b2 --prefix=/usr -q && ./b2 --prefix=/usr -q install && \
+  rm -rf /tmp/build
+
+# install bison 2.5.1
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "ftp://ftp.gnu.org/gnu/bison/bison-2.5.1.tar.gz" -o bison-2.5.1.tar.gz && \
+ tar -xzf bison-2.5.1.tar.gz && cd bison-2.5.1 && \
+ ./configure --prefix=/usr && make && make install && \
+ rm -rf /tmp/build
+
+# install thrift 0.9.1
+RUN mkdir -p /tmp/build && \
+ cd /tmp/build && curl -L "https://archive.apache.org/dist/thrift/0.9.1/thrift-0.9.1.tar.gz" -o thrift-0.9.1.tar.gz && \
+ tar -xf thrift-0.9.1.tar.gz && cd thrift-0.9.1 && \
+ ./configure --prefix=/usr --without-tests && \
+ make && make install && \
+ rm -rf /tmp/build
+
+# install protobuf 2.5.0
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.bz2" -o protobuf-2.5.0.tar.bz2 && \
+ tar -xjf protobuf-2.5.0.tar.bz2 && cd protobuf-2.5.0 && \
+ ./configure --prefix=/usr && make && make install && ldconfig && \
+ rm -rf /tmp/build
+
+# install python module 
+RUN pip --retries=50 --timeout=300 install pycrypto
+
+# create user gpadmin since HAWQ cannot run under root
+RUN groupadd -g 1000 gpadmin && \
+ useradd -u 1000 -g 1000 gpadmin && \
+ echo "gpadmin  ALL=(ALL)       NOPASSWD: ALL" > /etc/sudoers.d/gpadmin
+
+# sudo should not require tty
+RUN sed -i -e 's|Defaults    requiretty|#Defaults    requiretty|' /etc/sudoers
+
+RUN echo "#!/bin/bash" > /etc/profile.d/user.sh && \
+ echo "export USER=\`whoami\`" >> /etc/profile.d/user.sh && \
+ chmod a+x /etc/profile.d/user.sh
+
+ENV BASEDIR /data
+RUN mkdir -p /data && chmod 777 /data
+
+USER gpadmin
+
+# setup ssh client keys for gpadmin
+RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+WORKDIR /data
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile b/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
new file mode 100644
index 0000000..94a04fe
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM hawq/hawq-dev:centos6
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+USER root
+
+# install HDP 2.5.0
+RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0/hdp.repo" -o /etc/yum.repos.d/hdp.repo && \
+ yum install -y hadoop hadoop-hdfs hadoop-libhdfs hadoop-yarn hadoop-mapreduce hadoop-client hdp-select && \
+ yum clean all
+
+RUN ln -s /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh /usr/bin/hadoop-daemon.sh
+
+COPY conf/* /etc/hadoop/conf/
+
+COPY entrypoint.sh /usr/bin/entrypoint.sh
+COPY start-hdfs.sh /usr/bin/start-hdfs.sh
+
+USER gpadmin
+
+ENTRYPOINT ["entrypoint.sh"]
+CMD ["bash"]
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml
new file mode 100644
index 0000000..30f4eb9
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml
@@ -0,0 +1,134 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.1</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run 
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.resource-calculator</name>
+    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    <description>
+      The ResourceCalculator implementation to be used to compare 
+      Resources in the scheduler.
+      The default i.e. DefaultResourceCalculator only uses Memory while
+      DominantResourceCalculator uses dominant-resource to compare 
+      multi-dimensional resources such as Memory, CPU etc.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.node-locality-delay</name>
+    <value>40</value>
+    <description>
+      Number of missed scheduling opportunities after which the CapacityScheduler 
+      attempts to schedule rack-local containers. 
+      Typically this should be set to number of nodes in the cluster, By default is setting 
+      approximately number of nodes in one rack which is 40.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.queue-mappings</name>
+    <value></value>
+    <description>
+      A list of mappings that will be used to assign jobs to queues
+      The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
+      Typically this list will be used to map users to queues,
+      for example, u:%user:%user maps all users to queues with the same name
+      as the user.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
+    <value>false</value>
+    <description>
+      If a queue mapping is present, will it override the value specified
+      by the user? This can be used by administrators to place jobs in queues
+      that are different than the one specified by the user.
+      The default is false.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl b/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl
new file mode 100644
index 0000000..d50d80b
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+<tr>
+  <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+  <td><xsl:value-of select="value"/></td>
+  <td><xsl:value-of select="description"/></td>
+</tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg b/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg
new file mode 100644
index 0000000..d68cee8
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg
@@ -0,0 +1,4 @@
+yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group
+banned.users=#comma separated list of users who can not run applications
+min.user.id=1000#Prevent other super-users
+allowed.system.users=##comma separated list of system users who CAN run applications

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
new file mode 100644
index 0000000..afc37fc
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+	<property>
+		<name>fs.defaultFS</name>
+		<value>hdfs://${hdfs.namenode}:8020</value>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd
new file mode 100644
index 0000000..bb40ec9
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd
@@ -0,0 +1,92 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem Set Hadoop-specific environment variables here.
+
+@rem The only required environment variable is JAVA_HOME.  All others are
+@rem optional.  When running a distributed configuration it is best to
+@rem set JAVA_HOME in this file, so that it is correctly defined on
+@rem remote nodes.
+
+@rem The java implementation to use.  Required.
+set JAVA_HOME=%JAVA_HOME%
+
+@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
+@rem set JSVC_HOME=%JSVC_HOME%
+
+@rem set HADOOP_CONF_DIR=
+
+@rem Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+if exist %HADOOP_HOME%\contrib\capacity-scheduler (
+  if not defined HADOOP_CLASSPATH (
+    set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+  ) else (
+    set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+  )
+)
+
+@rem If TEZ_CLASSPATH is defined in the env, that means that TEZ is enabled
+@rem append it to the HADOOP_CLASSPATH
+
+if defined TEZ_CLASSPATH (
+  if not defined HADOOP_CLASSPATH (
+    set HADOOP_CLASSPATH=%TEZ_CLASSPATH%
+  ) else (
+    set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%TEZ_CLASSPATH%
+  )
+)
+
+@rem The maximum amount of heap to use, in MB. Default is 1000.
+@rem set HADOOP_HEAPSIZE=
+@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+@rem Extra Java runtime options.  Empty by default.
+@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
+
+@rem Command specific options appended to HADOOP_OPTS when specified
+if not defined HADOOP_SECURITY_LOGGER (
+  set HADOOP_SECURITY_LOGGER=INFO,RFAS
+)
+if not defined HDFS_AUDIT_LOGGER (
+  set HDFS_AUDIT_LOGGER=INFO,NullAppender
+)
+
+set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
+set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
+set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
+
+@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
+@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
+
+@rem On secure datanodes, user to run the datanode as after dropping privileges
+set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
+
+@rem Where log files are stored.  %HADOOP_HOME%/logs by default.
+@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
+
+@rem Where log files are stored in the secure data environment.
+set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
+
+@rem The directory where pid files are stored. /tmp by default.
+@rem NOTE: this should be set to a directory that can only be written to by 
+@rem       the user that will run the hadoop daemons.  Otherwise there is the
+@rem       potential for a symlink attack.
+set HADOOP_PID_DIR=%HADOOP_PID_DIR%
+set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
+
+@rem A string representing this instance of hadoop. %USERNAME% by default.
+set HADOOP_IDENT_STRING=%USERNAME%

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
new file mode 100644
index 0000000..95511ed
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
@@ -0,0 +1,110 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.
+export JAVA_HOME=/etc/alternatives/java_sdk
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol.  Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+#export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+#for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+#  if [ "$HADOOP_CLASSPATH" ]; then
+#    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+#  else
+#    export HADOOP_CLASSPATH=$f
+#  fi
+#done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Setup environment variable for docker image
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ -z "${NAMENODE}" ]; then
+  echo "environment variable NAMENODE is not set!"
+  exit 1
+fi
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Dhdfs.namenode=${NAMENODE}"
+#export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+#export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
+#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+
+#export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+#export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+#export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+#export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol.  This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+#export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=/var/log/hadoop
+export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+#export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by
+#       the user that will run the hadoop daemons.  Otherwise there is the
+#       potential for a symlink attack.
+#export HADOOP_PID_DIR=${HADOOP_PID_DIR}
+#export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+
+# A string representing this instance of hadoop. $USER by default.
+#export HADOOP_IDENT_STRING=$USER

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties
new file mode 100644
index 0000000..c1b2eb7
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties
@@ -0,0 +1,75 @@
+# Configuration of the "dfs" context for null
+dfs.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "dfs" context for file
+#dfs.class=org.apache.hadoop.metrics.file.FileContext
+#dfs.period=10
+#dfs.fileName=/tmp/dfsmetrics.log
+
+# Configuration of the "dfs" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# dfs.period=10
+# dfs.servers=localhost:8649
+
+
+# Configuration of the "mapred" context for null
+mapred.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "mapred" context for file
+#mapred.class=org.apache.hadoop.metrics.file.FileContext
+#mapred.period=10
+#mapred.fileName=/tmp/mrmetrics.log
+
+# Configuration of the "mapred" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# mapred.period=10
+# mapred.servers=localhost:8649
+
+
+# Configuration of the "jvm" context for null
+#jvm.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "jvm" context for file
+#jvm.class=org.apache.hadoop.metrics.file.FileContext
+#jvm.period=10
+#jvm.fileName=/tmp/jvmmetrics.log
+
+# Configuration of the "jvm" context for ganglia
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# jvm.period=10
+# jvm.servers=localhost:8649
+
+# Configuration of the "rpc" context for null
+rpc.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "rpc" context for file
+#rpc.class=org.apache.hadoop.metrics.file.FileContext
+#rpc.period=10
+#rpc.fileName=/tmp/rpcmetrics.log
+
+# Configuration of the "rpc" context for ganglia
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# rpc.period=10
+# rpc.servers=localhost:8649
+
+
+# Configuration of the "ugi" context for null
+ugi.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "ugi" context for file
+#ugi.class=org.apache.hadoop.metrics.file.FileContext
+#ugi.period=10
+#ugi.fileName=/tmp/ugimetrics.log
+
+# Configuration of the "ugi" context for ganglia
+# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# ugi.period=10
+# ugi.servers=localhost:8649
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties
new file mode 100644
index 0000000..0c09228
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties
@@ -0,0 +1,68 @@
+# syntax: [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
+# default sampling period, in seconds
+*.period=10
+
+# The namenode-metrics.out will contain metrics from all context
+#namenode.sink.file.filename=namenode-metrics.out
+# Specifying a special sampling period for namenode:
+#namenode.sink.*.period=8
+
+#datanode.sink.file.filename=datanode-metrics.out
+
+#resourcemanager.sink.file.filename=resourcemanager-metrics.out
+
+#nodemanager.sink.file.filename=nodemanager-metrics.out
+
+#mrappmaster.sink.file.filename=mrappmaster-metrics.out
+
+#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
+
+# the following example split metrics of different
+# context to different sinks (in this case files)
+#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_jvm.context=jvm
+#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
+#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_mapred.context=mapred
+#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
+
+#
+# Below are for sending metrics to Ganglia
+#
+# for Ganglia 3.0 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
+#
+# for Ganglia 3.1 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+
+# *.sink.ganglia.period=10
+
+# default for supportsparse is false
+# *.sink.ganglia.supportsparse=true
+
+#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Tag values to use for the ganglia prefix. If not defined no tags are used.
+# If '*' all tags are used. If specifiying multiple tags separate them with 
+# commas. Note that the last segment of the property name is the context name.
+#
+#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
+#*.sink.ganglia.tagsForPrefix.dfs=
+#*.sink.ganglia.tagsForPrefix.rpc=
+#*.sink.ganglia.tagsForPrefix.mapred=
+
+#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml
new file mode 100644
index 0000000..2bf5c02
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml
@@ -0,0 +1,226 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ 
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.user.mappings.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.ha.service.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HAService protocol used by HAAdmin to manage the
+      active and stand-by states of namenode.</description>
+  </property>
+
+  <property>
+    <name>security.zkfc.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for access to the ZK Failover Controller
+    </description>
+  </property>
+
+  <property>
+    <name>security.qjournal.service.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for QJournalProtocol, used by the NN to communicate with
+    JNs when using the QuorumJournalManager for edit logs.</description>
+  </property>
+
+  <property>
+    <name>security.mrhs.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HSClientProtocol, used by job clients to
+    communciate with the MR History Server job status etc. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <!-- YARN Protocols -->
+
+  <property>
+    <name>security.resourcetracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceTrackerProtocol, used by the
+    ResourceManager and NodeManager to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.resourcemanager-administration.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceManagerAdministrationProtocol, for admin commands. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationclient.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationClientProtocol, used by the ResourceManager 
+    and applications submission clients to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationmaster.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationMasterProtocol, used by the ResourceManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.containermanagement.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.resourcelocalizer.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceLocalizer protocol, used by the NodeManager 
+    and ResourceLocalizer to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for MRClientProtocol, used by job clients to
+    communciate with the MR ApplicationMaster to query job status etc. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationhistory.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationHistoryProtocol, used by the timeline
+    server and the generic history service client to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml
new file mode 100644
index 0000000..f565658
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml
@@ -0,0 +1,100 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+	<property>
+		<name>dfs.name.dir</name>
+		<value>/tmp/hdfs/name</value>
+		<final>true</final>
+	</property>
+
+	<property>
+		<name>dfs.data.dir</name>
+		<value>/tmp/hdfs/data</value>
+		<final>true</final>
+	</property>
+
+	<property>
+		<name>dfs.permissions</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.support.append</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.block.local-path-access.user</name>
+		<value>${user.name}</value>
+	</property>
+
+	<property>
+		<name>dfs.replication</name>
+		<value>3</value>
+	</property>
+
+	<property>
+		<name>dfs.datanode.socket.write.timeout</name>
+		<value>0</value>
+		<description>
+			used for sockets to and from datanodes. It is 8 minutes by default. Some
+			users set this to 0, effectively disabling the write timeout.
+		</description>
+	</property>
+
+	<property>
+		<name>dfs.webhdfs.enabled</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.allow.truncate</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.namenode.fs-limits.min-block-size</name>
+		<value>1024</value>
+	</property>
+
+	<property>
+		<name>dfs.client.read.shortcircuit</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.domain.socket.path</name>
+		<value>/var/lib/hadoop-hdfs/dn_socket</value>
+	</property>
+
+	<property>
+		<name>dfs.block.access.token.enable</name>
+		<value>true</value>
+		<description>
+			If "true", access tokens are used as capabilities for accessing
+			datanodes.
+			If "false", no access tokens are checked on accessing datanodes.
+		</description>
+	</property>
+
+	<property>
+		<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
+		<value>false</value>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml
new file mode 100644
index 0000000..cba69f4
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+  <!-- This file is hot-reloaded when it changes -->
+
+  <!-- KMS ACLs -->
+
+  <property>
+    <name>hadoop.kms.acl.CREATE</name>
+    <value>*</value>
+    <description>
+      ACL for create-key operations.
+      If the user is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DELETE</name>
+    <value>*</value>
+    <description>
+      ACL for delete-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.ROLLOVER</name>
+    <value>*</value>
+    <description>
+      ACL for rollover-key operations.
+      If the user is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-version and get-current-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_KEYS</name>
+    <value>*</value>
+    <description>
+      ACL for get-keys operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_METADATA</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-metadata and get-keys-metadata operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
+    <value>*</value>
+    <description>
+      Complementary ACL for CREATE and ROLLOVER operations to allow the client
+      to provide the key material when creating or rolling a key.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GENERATE_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for generateEncryptedKey CryptoExtension operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DECRYPT_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for decryptEncryptedKey CryptoExtension operations.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.MANAGEMENT</name>
+    <value>*</value>
+    <description>
+      default ACL for MANAGEMENT operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.GENERATE_EEK</name>
+    <value>*</value>
+    <description>
+      default ACL for GENERATE_EEK operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.DECRYPT_EEK</name>
+    <value>*</value>
+    <description>
+      default ACL for DECRYPT_EEK operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.READ</name>
+    <value>*</value>
+    <description>
+      default ACL for READ operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh
new file mode 100644
index 0000000..44dfe6a
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License. See accompanying LICENSE file.
+#
+
+# Set kms specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs KMS
+# Java System properties for KMS should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# KMS logs directory
+#
+# export KMS_LOG=${KMS_HOME}/logs
+
+# KMS temporary directory
+#
+# export KMS_TEMP=${KMS_HOME}/temp
+
+# The HTTP port used by KMS
+#
+# export KMS_HTTP_PORT=16000
+
+# The Admin port used by KMS
+#
+# export KMS_ADMIN_PORT=`expr ${KMS_HTTP_PORT} + 1`
+
+# The maximum number of Tomcat handler threads
+#
+# export KMS_MAX_THREADS=1000
+
+# The location of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
+
+# The password of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_PASS=password
+
+# The full path to any native libraries that need to be loaded
+# (For eg. location of natively compiled tomcat Apache portable
+# runtime (APR) libraries
+#
+# export JAVA_LIBRARY_PATH=${HOME}/lib/native

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties
new file mode 100644
index 0000000..8e6d909
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties
@@ -0,0 +1,38 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'kms.log.dir' is not defined at KMS start up time
+# Setup sets its value to '${kms.home}/logs'
+
+log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms.File=${kms.log.dir}/kms.log
+log4j.appender.kms.Append=true
+log4j.appender.kms.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n
+
+log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log
+log4j.appender.kms-audit.Append=true
+log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n
+
+log4j.logger.kms-audit=INFO, kms-audit
+log4j.additivity.kms-audit=false
+
+log4j.rootLogger=ALL, kms
+log4j.logger.org.apache.hadoop.conf=ERROR
+log4j.logger.org.apache.hadoop=INFO
+log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml
new file mode 100644
index 0000000..a810ca4
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml
@@ -0,0 +1,173 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+  <!-- KMS Backend KeyProvider -->
+
+  <property>
+    <name>hadoop.kms.key.provider.uri</name>
+    <value>jceks://file@/${user.home}/kms.keystore</value>
+    <description>
+      URI of the backing KeyProvider for the KMS.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.security.keystore.JavaKeyStoreProvider.password</name>
+    <value>none</value>
+    <description>
+      If using the JavaKeyStoreProvider, the password for the keystore file.
+    </description>
+  </property>
+
+  <!-- KMS Cache -->
+
+  <property>
+    <name>hadoop.kms.cache.enable</name>
+    <value>true</value>
+    <description>
+      Whether the KMS will act as a cache for the backing KeyProvider.
+      When the cache is enabled, operations like getKeyVersion, getMetadata,
+      and getCurrentKey will sometimes return cached data without consulting
+      the backing KeyProvider. Cached values are flushed when keys are deleted
+      or modified.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.cache.timeout.ms</name>
+    <value>600000</value>
+    <description>
+      Expiry time for the KMS key version and key metadata cache, in
+      milliseconds. This affects getKeyVersion and getMetadata.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.current.key.cache.timeout.ms</name>
+    <value>30000</value>
+    <description>
+      Expiry time for the KMS current key cache, in milliseconds. This
+      affects getCurrentKey operations.
+    </description>
+  </property>
+
+  <!-- KMS Audit -->
+
+  <property>
+    <name>hadoop.kms.audit.aggregation.window.ms</name>
+    <value>10000</value>
+    <description>
+      Duplicate audit log events within the aggregation window (specified in
+      ms) are quashed to reduce log traffic. A single message for aggregated
+      events is printed at the end of the window, along with a count of the
+      number of aggregated events.
+    </description>
+  </property>
+
+  <!-- KMS Security -->
+
+  <property>
+    <name>hadoop.kms.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication type for the KMS. Can be either &quot;simple&quot;
+      or &quot;kerberos&quot;.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.keytab</name>
+    <value>${user.home}/kms.keytab</value>
+    <description>
+      Path to the keytab with credentials for the configured Kerberos principal.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.principal</name>
+    <value>HTTP/localhost</value>
+    <description>
+      The Kerberos principal to use for the HTTP endpoint.
+      The principal must start with 'HTTP/' as per the Kerberos HTTP SPNEGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.name.rules</name>
+    <value>DEFAULT</value>
+    <description>
+      Rules used to resolve Kerberos principal names.
+    </description>
+  </property>
+
+  <!-- Authentication cookie signature source -->
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider</name>
+    <value>random</value>
+    <description>
+      Indicates how the secret to sign the authentication cookies will be
+      stored. Options are 'random' (default), 'string' and 'zookeeper'.
+      If using a setup with multiple KMS instances, 'zookeeper' should be used.
+    </description>
+  </property>
+
+  <!-- Configuration for 'zookeeper' authentication cookie signature source -->
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.path</name>
+    <value>/hadoop-kms/hadoop-auth-signature-secret</value>
+    <description>
+      The Zookeeper ZNode path where the KMS instances will store and retrieve
+      the secret from.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string</name>
+    <value>#HOSTNAME#:#PORT#,...</value>
+    <description>
+      The Zookeeper connection string, a list of hostnames and port comma
+      separated.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type</name>
+    <value>kerberos</value>
+    <description>
+      The Zookeeper authentication type, 'none' or 'sasl' (Kerberos).
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab</name>
+    <value>/etc/hadoop/conf/kms.keytab</value>
+    <description>
+      The absolute path for the Kerberos keytab with the credentials to
+      connect to Zookeeper.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal</name>
+    <value>kms/#HOSTNAME#</value>
+    <description>
+      The Kerberos service principal used to connect to Zookeeper.
+    </description>
+  </property>
+
+</configuration>