You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hawq.apache.org by hu...@apache.org on 2017/01/17 03:03:50 UTC

incubator-hawq git commit: HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base

Repository: incubator-hawq
Updated Branches:
  refs/heads/master ec7b4d9e9 -> 368dbc9e6


HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/368dbc9e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/368dbc9e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/368dbc9e

Branch: refs/heads/master
Commit: 368dbc9e64a2e62061ea47a0b9c7b15589dad457
Parents: ec7b4d9
Author: Richard Guo <gu...@gmail.com>
Authored: Tue Jan 3 17:12:59 2017 +0800
Committer: Ruilong Huo <rh...@pivotal.io>
Committed: Tue Jan 17 10:57:02 2017 +0800

----------------------------------------------------------------------
 contrib/hawq-docker/Makefile                    | 222 +++++++++++++++++++
 contrib/hawq-docker/README.md                   |  97 ++++++++
 .../centos6-docker/hawq-dev/Dockerfile          | 123 ++++++++++
 .../centos6-docker/hawq-test/Dockerfile         |  40 ++++
 .../centos6-docker/hawq-test/conf/core-site.xml |  24 ++
 .../centos6-docker/hawq-test/conf/hadoop-env.sh | 110 +++++++++
 .../centos6-docker/hawq-test/entrypoint.sh      |  34 +++
 .../centos6-docker/hawq-test/start-hdfs.sh      |  39 ++++
 .../centos7-docker/hawq-dev/Dockerfile          |  75 +++++++
 .../centos7-docker/hawq-test/Dockerfile         |  40 ++++
 .../centos7-docker/hawq-test/conf/core-site.xml |  24 ++
 .../centos7-docker/hawq-test/conf/hadoop-env.sh | 110 +++++++++
 .../centos7-docker/hawq-test/entrypoint.sh      |  33 +++
 .../centos7-docker/hawq-test/start-hdfs.sh      |  39 ++++
 14 files changed, 1010 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/Makefile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/Makefile b/contrib/hawq-docker/Makefile
new file mode 100644
index 0000000..120ebe2
--- /dev/null
+++ b/contrib/hawq-docker/Makefile
@@ -0,0 +1,222 @@
+#!/usr/bin/make all
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+THIS_MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+TOP_DIR := $(abspath $(dir ${THIS_MAKEFILE_PATH}))
+NDATANODES := 3
+CUR_DATANODE := 1
+OS_VERSION := centos7
+# Do not use underscore "_" in CLUSTER_ID
+CLUSTER_ID := $(OS_VERSION)
+# Monut this local directory to /data in data container and share with other containers
+LOCAL := 
+# networks used in docker
+NETWORK := $(CLUSTER_ID)_hawq_network
+
+all: 
+	@echo " Usage:"
+	@echo "    To setup a build and test environment:         make run"
+	@echo "    To start all containers:                       make start"
+	@echo "    To stop all containers:                        make stop"
+	@echo "    To remove hdfs containers:                     make clean"
+	@echo "    To remove all containers:                      make distclean"
+	@echo ""
+	@echo "    To build images locally:                       make build"
+	@echo "    To pull latest images:                         make pull"
+
+build:
+	@make -f $(THIS_MAKEFILE_PATH) build-hawq-dev-$(OS_VERSION)
+	@make -f $(THIS_MAKEFILE_PATH) build-hawq-test-$(OS_VERSION)
+	@echo "Build Images Done!"
+
+build-hawq-dev-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/Dockerfile
+	@echo build hawq-dev:$(OS_VERSION) image
+	docker build -t hawq/hawq-dev:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/
+
+build-hawq-test-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/Dockerfile
+	@echo build hawq-test:$(OS_VERSION) image
+	docker build -t hawq/hawq-test:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/
+
+create-data-container:
+	@echo create ${CLUSTER_ID}-data container
+	@if [ ! -z "$(LOCAL)" -a ! -d "$(LOCAL)" ]; then \
+		echo "LOCAL must be set to a directory!"; \
+		exit 1; \
+	fi
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-data$$" | grep -v CONTAINER`" ]; then \
+		if [ -z "$(LOCAL)" ]; then \
+			docker create -v /data --name=${CLUSTER_ID}-data hawq/hawq-dev:$(OS_VERSION) /bin/true; \
+		else \
+			docker create -v $(LOCAL):/data --name=${CLUSTER_ID}-data hawq/hawq-dev:$(OS_VERSION) /bin/true; \
+		fi \
+	else \
+		echo "${CLUSTER_ID}-data container already exist!"; \
+	fi
+
+run:
+	@if [ -z "`docker network ls 2>/dev/null`" ]; then \
+ 		make -f $(THIS_MAKEFILE_PATH) NETWORK=default create-data-container && \
+		make -f $(THIS_MAKEFILE_PATH) NETWORK=default run-hdfs; \
+	else \
+		if [ -z "`docker network ls 2>/dev/null | grep $(NETWORK)`" ]; then \
+			echo create network $(NETWORK) && \
+			docker network create --driver bridge $(NETWORK); \
+		fi && \
+		make -f $(THIS_MAKEFILE_PATH) create-data-container && \
+		make -f $(THIS_MAKEFILE_PATH) run-hdfs; \
+	fi
+
+run-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) run-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i run-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "HAWQ Environment Setup Done!"
+	@echo 'run "docker exec -it ${CLUSTER_ID}-namenode bash" to attach to ${CLUSTER_ID}-namenode node'
+
+run-namenode-container:
+	@echo "run ${CLUSTER_ID}-namenode container"
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker run --privileged -itd --net=$(NETWORK) --hostname=${CLUSTER_ID}-namenode --name=${CLUSTER_ID}-namenode \
+			--volumes-from ${CLUSTER_ID}-data hawq/hawq-test:$(OS_VERSION); \
+	else \
+		echo "${CLUSTER_ID}-namenode container already exist!"; \
+	fi
+
+run-datanode-container:
+	@echo "run ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker run --privileged -itd --net=$(NETWORK) --hostname=${CLUSTER_ID}-datanode$(CUR_DATANODE) \
+			--name=${CLUSTER_ID}-datanode$(CUR_DATANODE) -e NAMENODE=${CLUSTER_ID}-namenode \
+			--volumes-from ${CLUSTER_ID}-data hawq/hawq-test:$(OS_VERSION); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container already exist!"; \
+	fi
+
+start:
+	@make -f $(THIS_MAKEFILE_PATH) start-hdfs
+	@echo 'run "docker exec -it ${CLUSTER_ID}-namenode bash" to attach to ${CLUSTER_ID}-namenode node'
+
+start-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) start-namenode-container
+	@i=1;\
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i start-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Start All Containers Done!"
+
+start-namenode-container:
+	@echo "start ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker start ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \
+	fi
+
+start-datanode-container:
+	@echo "start ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker start ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \
+	fi
+
+stop:
+	@make -f $(THIS_MAKEFILE_PATH) stop-hdfs
+
+stop-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) stop-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i stop-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Stop All Containers Done!"
+
+stop-namenode-container:
+	@echo "stop ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker stop -t 0 ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!"; \
+	fi
+
+stop-datanode-container:
+	@echo "stop ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker stop -t 0 ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!"; \
+	fi
+
+remove-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) remove-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i remove-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Remove HDFS Done!"
+
+remove-namenode-container:
+	@echo "make ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!"; \
+	fi
+
+remove-datanode-container:
+	@echo "make ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!"; \
+	fi
+
+remove-data:
+	@echo remove ${CLUSTER_ID}-data container
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-data" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-data; \
+	else \
+		echo "${CLUSTER_ID}-data container does not exist!"; \
+	fi
+
+pull:
+	@echo latest images
+	docker pull hawq/hawq-dev:$(OS_VERSION)
+	docker pull hawq/hawq-test:$(OS_VERSION)
+
+clean:
+	@make -f $(THIS_MAKEFILE_PATH) stop 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-hdfs 2>&1 >/dev/null || true
+	@echo "Clean Done!"
+
+distclean:
+	@make -f $(THIS_MAKEFILE_PATH) stop 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-hdfs 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-data 2>&1 >/dev/null || true
+	@if [ ! -z "`docker network ls 2>/dev/null | grep $(NETWORK)`" ]; then \
+		echo remove network $(NETWORK); \
+		docker network rm $(NETWORK) 2>&1 >/dev/null || true; \
+	fi
+	@echo "Distclean Done!"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/README.md
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/README.md b/contrib/hawq-docker/README.md
new file mode 100644
index 0000000..4adeaaf
--- /dev/null
+++ b/contrib/hawq-docker/README.md
@@ -0,0 +1,97 @@
+# hawq-docker
+
+hawq-docker is based on *wangzw's* repo *hawq-devel-env*. It is the docker images and scripts to help developers of Apache HAWQ to setup building and testing environment with docker.
+
+Both CentOS 7 and CentOS 6 are supported.
+Change variable **OS_VERSION** (:= centos7 OR centos6) in Makefile to switch between CentOS 7 and CentOS 6.
+
+Take CentOS 7 as an example below.
+
+# Install docker
+* following the instructions to install docker.
+https://docs.docker.com/
+
+# Setup build and test environment
+* clone hawq repository
+```
+git clone https://github.com/apache/incubator-hawq.git .
+cd incubator-hawq/contrib/hawq-docker
+```
+* Get the docker images
+```
+  make pull (recommended)
+OR
+  make build
+``` 
+(Command `make pull` is to pull docker images from Docker Hub, while command `make build` is to build docker images locally. In general, `make pull` is faster than `make build`.)
+* setup a 5 nodes virtual cluster for Apache HAWQ build and test.
+```
+make run
+```
+Now let's have a look about what we creted.
+```
+[root@localhost hawq-docker]# docker ps -a
+CONTAINER ID        IMAGE                          COMMAND                CREATED             STATUS              PORTS               NAMES
+382b2b3360d1        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode3
+86513c331d45        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode2
+c0ab10e46e4a        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode1
+e27beea63953        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-namenode
+1f986959bd04        hawq/hawq-dev:centos7    "/bin/true"            2 minutes ago       Created                                 centos7-data
+```
+**centos7-data** is a data container and mounted to /data directory on all other containers to provide a shared storage for the cluster. 
+
+# Build and Test Apache HAWQ
+* attach to namenode
+```
+docker exec -it centos7-namenode bash
+```
+* check if HDFS working well
+```
+sudo -u hdfs hdfs dfsadmin -report
+```
+* clone Apache HAWQ code to /data direcotry
+```
+git clone https://github.com/apache/incubator-hawq.git /data/hawq
+```
+* build Apache HAWQ
+```
+cd /data/hawq
+./configure --prefix=/data/hawq-dev
+make
+make install
+```
+(When you are using CentOS 6, run command `scl enable devtoolset-2 bash` before
+configuring hawq and run command `exit` after installing hawq.) 
+* modify Apache HAWQ configuration
+```
+sed 's|localhost|centos7-namenode|g' -i /data/hawq-dev/etc/hawq-site.xml
+echo 'centos7-datanode1' >  /data/hawq-dev/etc/slaves
+echo 'centos7-datanode2' >>  /data/hawq-dev/etc/slaves
+echo 'centos7-datanode3' >>  /data/hawq-dev/etc/slaves
+```
+* Initialize Apache HAWQ cluster
+```
+sudo -u hdfs hdfs dfs -chown gpadmin /
+source /data/hawq-dev/greenplum_path.sh
+hawq init cluster
+```
+Now you can connect to database with `psql` command.
+```
+[gpadmin@centos7-namenode data]$ psql -d postgres
+psql (8.2.15)
+Type "help" for help.
+
+postgres=# 
+```
+# More command with this script
+```
+ Usage:
+    To setup a build and test environment:         make run
+    To start all containers:                       make start
+    To stop all containers:                        make stop
+    To remove hdfs containers:                     make clean
+    To remove all containers:                      make distclean
+    To build images locally:                       make build
+    To pull latest images:                         make pull
+```
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
new file mode 100644
index 0000000..9fb8476
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
@@ -0,0 +1,123 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM centos:6
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+# install all software we need
+RUN yum install -y epel-release && \
+ yum makecache && \
+ yum install -y man passwd sudo tar which git mlocate links make bzip2 \
+ autoconf automake libtool m4 gcc gcc-c++ gdb flex cmake gperf indent \
+ libuuid-devel krb5-devel libgsasl-devel expat-devel libxml2-devel \
+ perl-ExtUtils-Embed pam-devel python-devel snappy-devel \
+ libyaml-devel libevent-devel bzip2-devel openssl-devel \
+ openldap-devel readline-devel net-snmp-devel apr-devel \
+ libesmtp-devel xerces-c-devel python-pip json-c-devel \
+ apache-ivy java-1.7.0-openjdk-devel wget \
+ openssh-clients openssh-server perl-JSON && \
+ yum clean all
+
+# update gcc
+RUN wget -O /etc/yum.repos.d/slc6-devtoolset.repo http://linuxsoft.cern.ch/cern/devtoolset/slc6-devtoolset.repo && \
+ rpm --import http://ftp.scientificlinux.org/linux/scientific/5x/x86_64/RPM-GPG-KEYs/RPM-GPG-KEY-cern && \
+ yum install -y devtoolset-2-gcc devtoolset-2-binutils devtoolset-2-gcc-c++ && \
+ echo "source /opt/rh/devtoolset-2/enable" >> ~/.bashrc && \
+ source ~/.bashrc
+
+# install libcurl 7.45.0
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "http://curl.haxx.se/download/curl-7.45.0.tar.bz2" -o curl-7.45.0.tar.bz2 && \
+ tar -xjf curl-7.45.0.tar.bz2 && cd curl-7.45.0 && \
+ ./configure --prefix=/usr && make && make install && \
+ rm -rf /tmp/build && ldconfig
+
+# install maven
+RUN curl -L "http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo" -o /etc/yum.repos.d/epel-apache-maven.repo && \
+ yum install -y apache-maven && \
+ yum clean all
+
+# OS requirements
+RUN echo "kernel.sem = 250 512000 100 2048" >> /etc/sysctl.conf
+
+# setup ssh server and keys for root
+RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+# setup JAVA_HOME for all users
+RUN echo "#!/bin/sh" > /etc/profile.d/java.sh && \
+ echo "export JAVA_HOME=/etc/alternatives/java_sdk" >> /etc/profile.d/java.sh && \
+ chmod a+x /etc/profile.d/java.sh
+
+# install boost 1.59
+ RUN mkdir -p /tmp/build && \
+  cd /tmp/build && curl -L "http://downloads.sourceforge.net/project/boost/boost/1.59.0/boost_1_59_0.tar.bz2" -o boost_1_59_0.tar.bz2 && \
+  tar -xjf boost_1_59_0.tar.bz2 && cd boost_1_59_0 && \
+  ./bootstrap.sh && ./b2 --prefix=/usr -q && ./b2 --prefix=/usr -q install && \
+  rm -rf /tmp/build
+
+# install bison 2.5.1
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "ftp://ftp.gnu.org/gnu/bison/bison-2.5.1.tar.gz" -o bison-2.5.1.tar.gz && \
+ tar -xzf bison-2.5.1.tar.gz && cd bison-2.5.1 && \
+ ./configure --prefix=/usr && make && make install && \
+ rm -rf /tmp/build
+
+# install thrift 0.9.1
+RUN mkdir -p /tmp/build && \
+ cd /tmp/build && curl -L "https://archive.apache.org/dist/thrift/0.9.1/thrift-0.9.1.tar.gz" -o thrift-0.9.1.tar.gz && \
+ tar -xf thrift-0.9.1.tar.gz && cd thrift-0.9.1 && \
+ ./configure --prefix=/usr --without-tests && \
+ make && make install && \
+ rm -rf /tmp/build
+
+# install protobuf 2.5.0
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.bz2" -o protobuf-2.5.0.tar.bz2 && \
+ tar -xjf protobuf-2.5.0.tar.bz2 && cd protobuf-2.5.0 && \
+ ./configure --prefix=/usr && make && make install && ldconfig && \
+ rm -rf /tmp/build
+
+# install python module 
+RUN pip --retries=50 --timeout=300 install pycrypto
+
+# create user gpadmin since HAWQ cannot run under root
+RUN groupadd -g 1000 gpadmin && \
+ useradd -u 1000 -g 1000 gpadmin && \
+ echo "gpadmin  ALL=(ALL)       NOPASSWD: ALL" > /etc/sudoers.d/gpadmin
+
+# sudo should not require tty
+RUN sed -i -e 's|Defaults    requiretty|#Defaults    requiretty|' /etc/sudoers
+
+RUN echo "#!/bin/bash" > /etc/profile.d/user.sh && \
+ echo "export USER=\`whoami\`" >> /etc/profile.d/user.sh && \
+ chmod a+x /etc/profile.d/user.sh
+
+ENV BASEDIR /data
+RUN mkdir -p /data && chmod 777 /data
+
+USER gpadmin
+
+# setup ssh client keys for gpadmin
+RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+WORKDIR /data
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile b/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
new file mode 100644
index 0000000..94a04fe
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM hawq/hawq-dev:centos6
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+USER root
+
+# install HDP 2.5.0
+RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0/hdp.repo" -o /etc/yum.repos.d/hdp.repo && \
+ yum install -y hadoop hadoop-hdfs hadoop-libhdfs hadoop-yarn hadoop-mapreduce hadoop-client hdp-select && \
+ yum clean all
+
+RUN ln -s /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh /usr/bin/hadoop-daemon.sh
+
+COPY conf/* /etc/hadoop/conf/
+
+COPY entrypoint.sh /usr/bin/entrypoint.sh
+COPY start-hdfs.sh /usr/bin/start-hdfs.sh
+
+USER gpadmin
+
+ENTRYPOINT ["entrypoint.sh"]
+CMD ["bash"]
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
new file mode 100644
index 0000000..afc37fc
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+	<property>
+		<name>fs.defaultFS</name>
+		<value>hdfs://${hdfs.namenode}:8020</value>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
new file mode 100644
index 0000000..95511ed
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
@@ -0,0 +1,110 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.
+export JAVA_HOME=/etc/alternatives/java_sdk
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol.  Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+#export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+#for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+#  if [ "$HADOOP_CLASSPATH" ]; then
+#    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+#  else
+#    export HADOOP_CLASSPATH=$f
+#  fi
+#done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Setup environment variable for docker image
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ -z "${NAMENODE}" ]; then
+  echo "environment variable NAMENODE is not set!"
+  exit 1
+fi
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Dhdfs.namenode=${NAMENODE}"
+#export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+#export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
+#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+
+#export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+#export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+#export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+#export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol.  This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+#export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=/var/log/hadoop
+export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+#export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by
+#       the user that will run the hadoop daemons.  Otherwise there is the
+#       potential for a symlink attack.
+#export HADOOP_PID_DIR=${HADOOP_PID_DIR}
+#export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+
+# A string representing this instance of hadoop. $USER by default.
+#export HADOOP_IDENT_STRING=$USER

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh b/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
new file mode 100755
index 0000000..2c03287
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+if [ -z "${NAMENODE}" ]; then
+  export NAMENODE=${HOSTNAME}
+fi
+
+if [ ! -f /etc/profile.d/hadoop.sh ]; then
+  echo '#!/bin/bash' | sudo tee /etc/profile.d/hadoop.sh
+  echo "export NAMENODE=${NAMENODE}" | sudo tee -a /etc/profile.d/hadoop.sh
+  sudo chmod a+x /etc/profile.d/hadoop.sh
+fi
+
+sudo start-hdfs.sh
+sudo sysctl -p
+sudo ln -s /usr/lib/libthrift-0.9.1.so /usr/lib64/libthrift-0.9.1.so
+
+exec "$@"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh b/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
new file mode 100755
index 0000000..076fb0a
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+/etc/init.d/sshd start
+
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ "${NAMENODE}" == "${HOSTNAME}" ]; then
+  if [ ! -d /tmp/hdfs/name/current ]; then
+    su -l hdfs -c "hdfs namenode -format"
+  fi
+  
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.namenode.NameNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start namenode"
+  fi
+else
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.datanode.DataNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start datanode"
+  fi
+fi
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
new file mode 100644
index 0000000..58d4ef0
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
@@ -0,0 +1,75 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM centos:7
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+# install all software we need
+RUN yum install -y epel-release && \
+ yum makecache && \
+ yum install -y man passwd sudo tar which git mlocate links make bzip2 net-tools \
+ autoconf automake libtool m4 gcc gcc-c++ gdb bison flex cmake gperf maven indent \
+ libuuid-devel krb5-devel libgsasl-devel expat-devel libxml2-devel \
+ perl-ExtUtils-Embed pam-devel python-devel libcurl-devel snappy-devel \
+ thrift-devel libyaml-devel libevent-devel bzip2-devel openssl-devel \
+ openldap-devel protobuf-devel readline-devel net-snmp-devel apr-devel \
+ libesmtp-devel python-pip json-c-devel \
+ java-1.7.0-openjdk-devel lcov cmake \
+ openssh-clients openssh-server perl-JSON && \
+ yum clean all
+
+RUN pip --retries=50 --timeout=300 install pycrypto
+
+# OS requirement
+RUN echo "kernel.sem = 250 512000 100 2048" >> /etc/sysctl.conf
+
+# setup ssh server and keys for root
+RUN sshd-keygen && \
+ ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+# create user gpadmin since HAWQ cannot run under root
+RUN groupadd -g 1000 gpadmin && \
+ useradd -u 1000 -g 1000 gpadmin && \
+ echo "gpadmin  ALL=(ALL)       NOPASSWD: ALL" > /etc/sudoers.d/gpadmin
+
+# sudo should not require tty
+RUN sed -i -e 's|Defaults    requiretty|#Defaults    requiretty|' /etc/sudoers
+
+# setup JAVA_HOME for all users
+RUN echo "#!/bin/sh" > /etc/profile.d/java.sh && \
+ echo "export JAVA_HOME=/etc/alternatives/java_sdk" >> /etc/profile.d/java.sh && \
+ chmod a+x /etc/profile.d/java.sh
+
+# set USER env
+RUN echo "#!/bin/bash" > /etc/profile.d/user.sh && \
+ echo "export USER=\`whoami\`" >> /etc/profile.d/user.sh && \
+ chmod a+x /etc/profile.d/user.sh
+
+ENV BASEDIR /data
+RUN mkdir -p /data && chmod 777 /data
+
+USER gpadmin
+
+# setup ssh client keys for gpadmin
+RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+WORKDIR /data

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
new file mode 100644
index 0000000..ea5e22c
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM hawq/hawq-dev:centos7
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+USER root
+
+## install HDP 2.5.0
+RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.0.0/hdp.repo" -o /etc/yum.repos.d/hdp.repo && \
+ yum install -y hadoop hadoop-hdfs hadoop-libhdfs hadoop-yarn hadoop-mapreduce hadoop-client hdp-select && \
+ yum clean all
+
+RUN ln -s /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh /usr/bin/hadoop-daemon.sh
+
+COPY conf/* /etc/hadoop/conf/
+
+COPY entrypoint.sh /usr/bin/entrypoint.sh
+COPY start-hdfs.sh /usr/bin/start-hdfs.sh
+
+USER gpadmin
+
+ENTRYPOINT ["entrypoint.sh"]
+CMD ["bash"]
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
new file mode 100644
index 0000000..afc37fc
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+	<property>
+		<name>fs.defaultFS</name>
+		<value>hdfs://${hdfs.namenode}:8020</value>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
new file mode 100644
index 0000000..95511ed
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
@@ -0,0 +1,110 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.
+export JAVA_HOME=/etc/alternatives/java_sdk
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol.  Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+#export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+#for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+#  if [ "$HADOOP_CLASSPATH" ]; then
+#    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+#  else
+#    export HADOOP_CLASSPATH=$f
+#  fi
+#done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Setup environment variable for docker image
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ -z "${NAMENODE}" ]; then
+  echo "environment variable NAMENODE is not set!"
+  exit 1
+fi
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Dhdfs.namenode=${NAMENODE}"
+#export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+#export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
+#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+
+#export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+#export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+#export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+#export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol.  This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+#export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=/var/log/hadoop
+export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+#export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by
+#       the user that will run the hadoop daemons.  Otherwise there is the
+#       potential for a symlink attack.
+#export HADOOP_PID_DIR=${HADOOP_PID_DIR}
+#export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+
+# A string representing this instance of hadoop. $USER by default.
+#export HADOOP_IDENT_STRING=$USER

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh b/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
new file mode 100755
index 0000000..abdc508
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+if [ -z "${NAMENODE}" ]; then
+  export NAMENODE=${HOSTNAME}
+fi
+
+if [ ! -f /etc/profile.d/hadoop.sh ]; then
+  echo '#!/bin/bash' | sudo tee /etc/profile.d/hadoop.sh
+  echo "export NAMENODE=${NAMENODE}" | sudo tee -a /etc/profile.d/hadoop.sh
+  sudo chmod a+x /etc/profile.d/hadoop.sh
+fi
+
+sudo start-hdfs.sh
+sudo sysctl -p
+
+exec "$@"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh b/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
new file mode 100755
index 0000000..f39200d
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+/usr/sbin/sshd
+
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ "${NAMENODE}" == "${HOSTNAME}" ]; then
+  if [ ! -d /tmp/hdfs/name/current ]; then
+    su -l hdfs -c "hdfs namenode -format"
+  fi
+  
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.namenode.NameNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start namenode"
+  fi
+else
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.datanode.DataNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start datanode"
+  fi
+fi
+