You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hawq.apache.org by wl...@apache.org on 2015/12/17 05:48:15 UTC

[2/2] incubator-hawq git commit: HAWQ-186. Remove some useless files

HAWQ-186. Remove some useless files


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/10909db4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/10909db4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/10909db4

Branch: refs/heads/master
Commit: 10909db4a2f0a20121f14e666eb0e253547b9ecb
Parents: d9e1578
Author: Wen Lin <wl...@pivotal.io>
Authored: Thu Dec 17 12:55:30 2015 +0800
Committer: Wen Lin <wl...@pivotal.io>
Committed: Thu Dec 17 12:55:30 2015 +0800

----------------------------------------------------------------------
 depends/libyarn/releng/Makefile                 | 235 --------
 depends/libyarn/releng/bin/cleanup-phd.sh       |  43 --
 depends/libyarn/releng/bin/gphdinst.sh          | 536 -------------------
 depends/libyarn/releng/bin/ha_failover.sh       |  36 --
 depends/libyarn/releng/bin/install-phd.sh       |  71 ---
 depends/libyarn/releng/bin/setup_gphdinst.sh    | 175 ------
 .../bin/templates/.core-site-secure-ha.xml.swp  | Bin 12288 -> 0 bytes
 .../releng/bin/templates/core-site-ha.xml       |  28 -
 .../bin/templates/core-site-secure-ha.xml       |  39 --
 .../releng/bin/templates/core-site-secure.xml   |  34 --
 .../libyarn/releng/bin/templates/core-site.xml  |  24 -
 .../releng/bin/templates/download-config        |   5 -
 .../releng/bin/templates/hadoop-env-ha.sh       |  72 ---
 .../releng/bin/templates/hadoop-env-secure.sh   |  73 ---
 .../libyarn/releng/bin/templates/hadoop-env.sh  |  72 ---
 .../templates/hdfs-site-datanode-secure-ha.xml  | 240 ---------
 .../bin/templates/hdfs-site-datanode-secure.xml | 193 -------
 .../releng/bin/templates/hdfs-site-ha.xml       | 172 ------
 .../templates/hdfs-site-namenode-secure-ha.xml  | 224 --------
 .../bin/templates/hdfs-site-namenode-secure.xml | 181 -------
 .../bin/templates/hdfs-site-secure-ha.xml       | 240 ---------
 .../releng/bin/templates/hdfs-site-secure.xml   | 193 -------
 .../libyarn/releng/bin/templates/hdfs-site.xml  | 123 -----
 .../releng/bin/templates/log4j-ha.properties    |  75 ---
 .../libyarn/releng/bin/templates/yarn-env.sh    |  87 ---
 .../libyarn/releng/bin/templates/yarn-site.xml  | 112 ----
 depends/libyarn/releng/bin/templates/zoo.cfg    |   8 -
 depends/libyarn/releng/etc/ivy.xml              |  38 --
 depends/libyarn/releng/etc/local/.gitignore     |   1 -
 .../releng/etc/local/hdfs-site.xml.simple       |  65 ---
 .../libyarn/releng/make/dependencies/.gitignore |   1 -
 .../libyarn/releng/make/dependencies/build.xml  |  84 ---
 .../libyarn/releng/make/dependencies/cacerts    | Bin 83678 -> 0 bytes
 .../releng/make/dependencies/ivy-report.css     | 279 ----------
 depends/libyarn/releng/make/dependencies/ivy.sh |  70 ---
 .../libyarn/releng/make/dependencies/ivy.xml    |  42 --
 .../releng/make/dependencies/ivy_util.sh        |  60 ---
 .../releng/make/dependencies/ivysettings.xml    |  68 ---
 depends/libyarn/releng/publish/ivy.xml          |  36 --
 depends/libyarn/releng/publish/ivysettings.xml  |  56 --
 depends/libyarn/releng/set_bld_arch.sh          |  63 ---
 src/port/win32.ico                              | Bin 22486 -> 0 bytes
 tools/bin/ext/yaml/__init__.pyc                 | Bin 11360 -> 0 bytes
 tools/bin/ext/yaml/composer.pyc                 | Bin 4204 -> 0 bytes
 tools/bin/ext/yaml/constructor.pyc              | Bin 22333 -> 0 bytes
 tools/bin/ext/yaml/cyaml.pyc                    | Bin 3846 -> 0 bytes
 tools/bin/ext/yaml/dumper.pyc                   | Bin 2614 -> 0 bytes
 tools/bin/ext/yaml/emitter.pyc                  | Bin 32743 -> 0 bytes
 tools/bin/ext/yaml/error.pyc                    | Bin 3040 -> 0 bytes
 tools/bin/ext/yaml/events.pyc                   | Bin 5094 -> 0 bytes
 tools/bin/ext/yaml/loader.pyc                   | Bin 1925 -> 0 bytes
 tools/bin/ext/yaml/nodes.pyc                    | Bin 2236 -> 0 bytes
 tools/bin/ext/yaml/parser.pyc                   | Bin 14891 -> 0 bytes
 tools/bin/ext/yaml/reader.pyc                   | Bin 6802 -> 0 bytes
 tools/bin/ext/yaml/representer.pyc              | Bin 15117 -> 0 bytes
 tools/bin/ext/yaml/resolver.pyc                 | Bin 6782 -> 0 bytes
 tools/bin/ext/yaml/scanner.pyc                  | Bin 34012 -> 0 bytes
 tools/bin/ext/yaml/serializer.pyc               | Bin 4430 -> 0 bytes
 tools/bin/ext/yaml/tokens.pyc                   | Bin 6678 -> 0 bytes
 59 files changed, 4154 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/Makefile
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/Makefile b/depends/libyarn/releng/Makefile
deleted file mode 100644
index 851f819..0000000
--- a/depends/libyarn/releng/Makefile
+++ /dev/null
@@ -1,235 +0,0 @@
-## ======================================================================
-## 
-## ======================================================================
-
-BLD_TOP:=$(shell sh -c pwd)
-SHELL=/bin/bash
-
-##-------------------------------------------------------------------------------------
-## dependent modules
-##
-## NOTE: Dependent project module version is kept in $(BLD_TOP)/make/dependencies/ivy.xml
-##-------------------------------------------------------------------------------------
-
-GREP_SED_VAR = $(BLD_TOP)/make/dependencies/ivy.xml | sed -e 's|\(.*\)rev="\(.*\)"[ ]*conf\(.*\)|\2|'
-CMAKE_VER  = $(shell grep "\"cmake\"" $(GREP_SED_VAR))
-
-TARGET_VERSION = ${LIBHDFS3_VERSION}
-
-ifeq "$(LIBHDFS3_VERSION)" ""
-TARGET_VERSION = `cat ${BLD_TOP}/../build/version`
-endif
-
-ifeq "$(LIBHDFS3_NOW)" ""
-TARGET_TIME = `date +%Y%m%d%H%M%S`
-endif
-
-ifeq "$(LIBHDFS3_TEST_URL)" ""
-LIBHDFS3_TEST_URL = localhost:9000
-endif
-
-ifneq "$(LIBYARN_PUBLISH_OVERWRITE)" ""
-LIBYARN_PUBLISH_OVERWRITE = -overwrite
-endif
-
-##-------------------------------------------------------------------------------------
-
-# Determine the default target architecture on this machine, if override not set
-ifeq "$(BLD_ARCH)" ""
-export BLD_ARCH:=$(shell $(BLD_TOP)/set_bld_arch.sh)
-endif
-
-export PATH:=${BLD_TOP}/../ext/${BLD_ARCH}/bin:${BLD_TOP}/../ext/${BLD_ARCH}/cmake-${CMAKE_VER}/bin:${PATH}
-
-# construct test directory prefix on hdfs.
-CUR_DATE=$(shell date +%s)
-TEST_HDFS_PREFIX="${BLD_ARCH}/${CUR_DATE}/"
-
-# prepare HDFS instance
-ifeq "${HDFS_CONFIG}" ""
-export HDFS_CONFIG:=local
-endif
-
-export HDFS_CONFIG_PATH:=${BLD_TOP}/etc/${HDFS_CONFIG}/hdfs-site.xml
-
-ifeq "${HDFS_USER}" ""
-export HDFS_USER:=${USER}
-endif
-
-ifeq "${DATA_PATH}" ""
-export DATA_PATH:=/tmp/libyarn-pulse
-endif
-
-ifneq "${LIBHDFS_HDFS_SUPERUSER}" ""
-export HDFS_SUPERUSER_OPTION:=-DHDFS_SUPERUSER=${LIBHDFS_HDFS_SUPERUSER}
-endif
-
-# include thirdparty infrastructure which depends on WHERE_THE...
-BLD_WHERE_THE_LIBRARY_THINGS_ARE=LD_LIBRARY_PATH
-osx106_x86_WHERE_THE_LIBRARY_THINGS_ARE=DYLD_LIBRARY_PATH
-ifneq "$($(BLD_ARCH)_WHERE_THE_LIBRARY_THINGS_ARE)" ""
-BLD_WHERE_THE_LIBRARY_THINGS_ARE=$($(BLD_ARCH)_WHERE_THE_LIBRARY_THINGS_ARE)
-endif
-
-SED_INLINE_EDIT_BAKUP_FILE=
-
-ifeq "$(BLD_ARCH)" "osx106_x86"
-	SED_INLINE_EDIT_BAKUP_FILE=''
-	export $(BLD_WHERE_THE_LIBRARY_THINGS_ARE):=/opt/gcc-4.4.2-osx106/lib:${BLD_TOP}/../ext/${BLD_ARCH}/lib:$($(BLD_WHERE_THE_LIBRARY_THINGS_ARE))
-else
-	export $(BLD_WHERE_THE_LIBRARY_THINGS_ARE):=/opt/gcc-4.4.2/lib64:${BLD_TOP}/../ext/${BLD_ARCH}/lib:$($(BLD_WHERE_THE_LIBRARY_THINGS_ARE))
-endif
-
-##-------------------
-## set compiler
-##-------------------
-
-CCOMPILER=${CC}
-CXXCOMPILER=${CXX}
-
-ifeq "${CCOMPILER}" ""
-CCOMPILER=gcc
-endif
-
-ifeq "${CXXCOMPILER}" ""
-CXXCOMPILER=g++
-endif
-
-## ----------------------------------------------------------------------
-## libhdfs targets
-## ----------------------------------------------------------------------
-
-all: libhdfs-sync_tools libhdfs-clean libhdfs-configure libhdfs-build libhdfs-unittest
-
-${BLD_TOP}/../build:
-	mkdir ${BLD_TOP}/../build
-
-libhdfs-sync_tools:
-	make sync_tools
-
-libhdfs-clean:
-	@rm -rf ${BLD_TOP}/../dist ${BLD_TOP}/../build
-
-libhdfs-configure: ${BLD_TOP}/../build
-	(cd ${BLD_TOP}/../build && cmake ${BLD_TOP}/.. -DTEST_HDFS_PREFIX=${TEST_HDFS_PREFIX} $(LIBHDFS3_CONF_FLAGS) \
-													-DCMAKE_C_COMPILER=${CCOMPILER} -DCMAKE_CXX_COMPILER=${CXXCOMPILER} \
-	                                                -DCMAKE_INSTALL_PREFIX=${BLD_TOP}/../dist/${BLD_ARCH} \
-                                                    -DCMAKE_PREFIX_PATH=${BLD_TOP}/../ext/${BLD_ARCH} \
-                                                    ${HDFS_SUPERUSER_OPTION})
-
-libhdfs-build:
-	(cd ${BLD_TOP}/../build && make)
-
-libhdfs-install:
-	rm -rf ${BLD_TOP}/../dist/${BLD_ARCH}
-	(cd ${BLD_TOP}/../build && make install)
-
-libhdfs-test:
-	(cd ${BLD_TOP}/../build && make test)
-	
-libhdfs-showcov:
-	(cd ${BLD_TOP}/../build && make ShowCoverage)
-
-libhdfs-functiontest:
-	(export GTEST_OUTPUT="xml:functiontest.xml"; cd ${BLD_TOP}/../build && make functiontest)
-
-libhdfs-securetest:
-	(export GTEST_OUTPUT="xml:securetest.xml"; cd ${BLD_TOP}/../build && make securetest)
-
-libhdfs-unittest:
-	(export GTEST_OUTPUT="xml:unittest.xml"; cd ${BLD_TOP}/../build && make unittest)
-
-libhdfs-update-function-test.xml:
-	(cd ${BLD_TOP}/../test/data && chmod a+w function-test.xml && sed -e 's|localhost:9000|${LIBHDFS3_TEST_URL}|' -i ${SED_INLINE_EDIT_BAKUP_FILE} function-test.xml)
-	(cd ${BLD_TOP}/../test/data && chmod a+w function-secure.xml && sed -e 's|localhost:9000|${LIBHDFS3_TEST_URL}|' -i ${SED_INLINE_EDIT_BAKUP_FILE} function-secure.xml)
-
-libhdfs-package: libhdfs-install
-	(cd ${BLD_TOP}/../dist/${BLD_ARCH} && find . -type f -or -type l | xargs ${MD5} > /tmp/checksums.libyarn && mv /tmp/checksums.libyarn ${BLD_TOP}/../dist/${BLD_ARCH} && \
-	 cd ${BLD_TOP}/../dist/ && tar -czvf libyarn-${BLD_ARCH}-${TARGET_VERSION}.targz ${BLD_ARCH} )
-
-libhdfs-update-ivys:
-	(cd ${BLD_TOP}/../dist/ && mkdir -p ${TARGET_VERSION}/ivys && cp ${BLD_TOP}/etc/ivy.xml ${TARGET_VERSION}/ivys/ivy-${TARGET_VERSION}.xml \
-		&& sed -e 's|<version>|'${TARGET_VERSION}'|' -i ${SED_INLINE_EDIT_BAKUP_FILE} ${TARGET_VERSION}/ivys/ivy-${TARGET_VERSION}.xml \
-		&& sed -e 's|<time>|'${TARGET_TIME}'|' -i ${SED_INLINE_EDIT_BAKUP_FILE} ${TARGET_VERSION}/ivys/ivy-${TARGET_VERSION}.xml)
-
-libhdfs-ivy-package: libhdfs-package libhdfs-update-ivys
-	(cd ${BLD_TOP}/../dist/ && mkdir -p ${TARGET_VERSION}/targzs && cp libyarn-${BLD_ARCH}-${TARGET_VERSION}.targz ${TARGET_VERSION}/targzs/ && tar -cjvf libyarn-${BLD_ARCH}-${TARGET_VERSION}.tar.bz2 ${TARGET_VERSION})
-
-libhdfs-update-hdfs-conf:
-	(cd ${BLD_TOP}/etc/${HDFS_CONFIG} && cp -f hdfs-site.xml.simple hdfs-site.xml && sed -e 's|@user@|${HDFS_USER}|' -i ${SED_INLINE_EDIT_BAKUP_FILE} hdfs-site.xml \
-		&& mkdir -p ${DATA_PATH} && sed -e 's|@data_path@|${DATA_PATH}|' -i ${SED_INLINE_EDIT_BAKUP_FILE} hdfs-site.xml)
-
-libhdfs-cleanup-phd:
-	(${BLD_TOP}/bin/cleanup-phd.sh)
-
-libhdfs-install-phd: libhdfs-update-hdfs-conf
-	(${BLD_TOP}/bin/install-phd.sh)
-
-## ----------------------------------------------------------------------
-## Sync/Clean tools
-## ----------------------------------------------------------------------
-## Populate/clean up dependent releng supported tools.  The projects are
-## downloaded and installed into /opt/releng/...
-##
-## Tool dependencies and platform config mappings are defined in:
-##   * Apache Ivy dependency definition file
-##       releng/make/dependencies/ivy.xml
-## ----------------------------------------------------------------------
-
-opt_write_test:
-	@if [ ! -w /opt ]; then \
-	    echo ""; \
-	    echo "======================================================================"; \
-	    echo "ERROR: /opt is not writable."; \
-	    echo "----------------------------------------------------------------------"; \
-	    echo "  Supporting tools are stored in /opt.  Please ensure you have"; \
-	    echo "  write access to /opt"; \
-	    echo "======================================================================"; \
-	    echo ""; \
-	    exit 1; \
-	fi
-
-/opt/releng/apache-ant: 
-	${MAKE} opt_write_test
-	echo "Sync Ivy project dependency management framework ..."
-	curl --silent http://build-prod.dh.greenplum.com/tools/apache-ant.1.8.1.tar.gz -o /tmp/apache-ant.1.8.1.tar.gz
-	( umask 002; [ ! -d /opt/releng ] && mkdir -p /opt/releng; \
-	   cd /opt/releng; \
-	   gunzip -qc /tmp/apache-ant.1.8.1.tar.gz | tar xf -; \
-	   rm -f /tmp/apache-ant.1.8.1.tar.gz; \
-	   chmod -R a+w /opt/releng/apache-ant )
-
-sync_tools: opt_write_test /opt/releng/apache-ant
-	@cd make/dependencies; \
-	 (umask 002; /opt/releng/apache-ant/bin/ant -DBLD_ARCH=$(BLD_ARCH) resolve); \
-	 echo "Resolve finished"
-
-clean_tools: opt_write_test
-	@cd releng/make/dependencies; \
-	/opt/releng/apache-ant/bin/ant clean; \
-	rm -rf /opt/releng/apache-ant; \
-
-# Publish Libhdfs
-local_publish: /opt/releng/apache-ant
-	@if [ -f publish/ivy.xml ]; then \
-		java -Xmx512M -jar /opt/releng/apache-ant/lib/ivy-2.2.0.jar \
-			-settings publish/ivysettings.xml \
-			-ivy publish/ivy.xml \
-			-publish local \
-			-publishpattern "../dist/[artifact]-[revision].[ext]" \
-			-revision ${TARGET_VERSION} \
-			-status release \
-			-overwrite; \
-			fi; 
-public_publish: /opt/releng/apache-ant
-	@if [ -f publish/ivy.xml ]; then \
-		java -Xmx512M -Djavax.net.ssl.trustStore=$(BLD_TOP)/make/dependencies/cacerts \
-			-jar /opt/releng/apache-ant/lib/ivy-2.2.0.jar \
-			-settings publish/ivysettings.xml \
-			-ivy publish/ivy.xml \
-			-publish public \
-			-publishpattern "../dist/[artifact]-[revision].[ext]" \
-			-revision ${TARGET_VERSION} \
-			-status release \
-			$(LIBYARN_PUBLISH_OVERWRITE); \
-	fi;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/cleanup-phd.sh
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/cleanup-phd.sh b/depends/libyarn/releng/bin/cleanup-phd.sh
deleted file mode 100644
index 7a9c2ff..0000000
--- a/depends/libyarn/releng/bin/cleanup-phd.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-if [ "x${DATA_PATH}" != "x" ]; then
-	if [ -e ${DATA_PATH} ]; then
-		echo "dump namenode log"
-		cat ${DATA_PATH}/hadoop*/logs/*namenode*log*
-		echo "dump datanode log"
-		cat ${DATA_PATH}/hadoop*/logs/*datanode*log*
-	fi 
-
-	if [ "x${KEEP_LOG}" != "xtrue" ]; then
-		rm -rf ${DATA_PATH}/*
-	fi
-
-	rm -rf ${DATA_PATH}/fs
-	rm -rf ${DATA_PATH}/data
-fi
-
-NAMENODE_PIDS=`ps aux | grep java | grep NameNode | awk '{print $2}'`
-if [ "x${NAMENODE_PIDS}" != "x" ]; then
-	kill -9 ${NAMENODE_PIDS}
-fi
-
-DATANODE_PIDS=`ps aux | grep java | grep DataNode | awk '{print $2}'`
-if [ "x${DATANODE_PIDS}" != "x" ]; then
-	kill -9 ${DATANODE_PIDS}
-fi

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/gphdinst.sh
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/gphdinst.sh b/depends/libyarn/releng/bin/gphdinst.sh
deleted file mode 100644
index 99b7085..0000000
--- a/depends/libyarn/releng/bin/gphdinst.sh
+++ /dev/null
@@ -1,536 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-## ======================================================================
-## TODO
-## * make script work for single and muli-node installs
-## * should we be able to identify the usage of the hdfs instance
-## * need to determine how to setup/start httpd
-## * how to pass in server name values
-## ======================================================================
-
-# Do not run on gpdb9.rel.dh.greenplum.com as this script will delete its shared HDFS cluster
-if [[ $(hostname) =~ "gpdb9.rel" ]]; then
-    echo "ABORT! Do not run this script on gpdb9.rel.dh.greenplum.com as this script will delete its shared cluster!"
-    exit 1
-fi
-
-# Make sure only RHEL6.2 machines can run our script
-cat /etc/redhat-release | grep "6." > /dev/null
-if [ $? -ne 0 ]; then
-    echo "This script must be run on a RHEL6.2 cluster"
-    exit 1
-fi
-
-# Make sure only root can run our script
-if [[ $EUID -ne 0 ]]; then
-   echo "This script must be run as root" 1>&2
-   exit 1
-fi
-
-# Make sure if using HDFS HA that there are at least 3 datanodes not including a namenode
-if [ "${ENABLE_QUORUM}" = "true" ] && [ ! $( echo ${DATANODES} | wc -w ) -ge 3 ]; then
-    echo "You must have at least three datanodes to have as the zkservers"
-    exit 1
-fi
-
-##
-## Courtesy - http://stevemorin.blogspot.com/2007/10/bash-get-self-directory-trick.html
-## Updated to run on rhel3 (kite12)
-##
-
-script_path="$(cd $(dirname $0); pwd -P)/$(basename $0)"
-
-[[ ! -f "$script_path" ]] && script_path="$(cd $(/usr/bin/dirname "$0"); pwd -P)/$(basename "$0")"
-[[ ! -f "$script_path" ]] && script_path="" && echo 'No full path to running script found!' && exit 1
-
-script_dir="${script_path%/*}"
-
-##
-## Global variables
-##
-
-DCA_SETUP=${DCA_SETUP:=false}
-DOWNLOAD_DIR=${DOWNLOAD_DIR:=~/downloads}
-ARCHIVE_URL=${ARCHIVE_URL:=http://hdsh132.lss.emc.com:8080/view/Ongoing-sprint-release-build/job/HudsonHD2_0_1_0ReleaseBuild_ongoing_sprint_release/lastSuccessfulBuild/artifact/*zip*/archive.zip}
-ARCHIVE=$( basename ${ARCHIVE_URL} )
-HADOOP_CONF_DIR=/etc/gphd/hadoop/conf
-ZK_CONF_DIR=/etc/gphd/zookeeper/conf
-NAMENODE=${NAMENODE:=smdw}
-SECONDARY_NAMENODE=${SECONDARY_NAMENODE:=mdw}
-DATANODES=${DATANODES:=sdw1 sdw2}
-RESOURCEMANAGER=${RESOURCEMANAGER:=smdw}
-NODEMANAGERS=${DATANODES:=sdw1 sdw2}
-HDFS_DATA_DIRECTORY_ROOT=${HDFS_DATA_DIRECTORY_ROOT:=/data}
-JOURNAL_DATA_ROOT=${JOURNAL_DATA_ROOT:=/data/journal}
-export HADOOP_HOME=${HADOOP_HOME:=/usr/lib/gphd/hadoop}
-export HADOOP_YARN_HOME=${HADOOP_YARN_HOME:=/usr/lib/gphd/hadoop-yarn}
-export ZK_HOME=${ZK_HOME:=/usr/lib/gphd/zookeeper}
-export ZOO_LOG_DIR=${ZOO_LOG_DIR:=/tmp/zookeeper/logs}
-export JAVA_HOME=${JAVA_HOME:=/opt/jdk1.7.0_15}
-
-## ======================================================================
-## Functions
-## ======================================================================
-
-## ----------------------------------------------------------------------
-## gphd_stop_remove()
-##   o Stop any existing namenode and datanode processes
-##   o Display running java processes with "jps" command
-##   o Remove hadoop*, bigtop* and zookeeper rpms
-##   o Remove /usr/lib/gphd
-## ----------------------------------------------------------------------
-
-display_jps(){
-    echo "======================================================================"
-    for node in ${NAMENODE} ${SECONDARY_NAMENODE} ${DATANODES/%${SECONDARY_NAMENODE}/}; do
-        echo $node
-        ssh $node "${JAVA_HOME}/bin/jps"
-        echo
-    done
-    echo "======================================================================"
-}
-
-gphd_stop_remove(){
-
-    ## Sanity Stop request for NN, 2NN, and DN only (Actual stop is when run finishes in sys_mgmt_utils.sh)
-    ssh ${NAMENODE}           "[ -f ${HADOOP_HOME}/sbin/hadoop-daemon.sh ] && sudo -u hdfs ${HADOOP_HOME}/sbin/hadoop-daemon.sh stop namenode"
-    ssh ${SECONDARY_NAMENODE} "[ -f ${HADOOP_HOME}/sbin/hadoop-daemon.sh ] && sudo -u hdfs ${HADOOP_HOME}/sbin/hadoop-daemon.sh stop secondarynamenode"
-
-    for node in ${DATANODES}; do
-	ssh $node "[ -f ${HADOOP_HOME}/sbin/hadoop-daemon.sh ] && sudo -u hdfs ${HADOOP_HOME}/sbin/hadoop-daemon.sh stop datanode"
-    done
-
-    display_jps
-
-    for node in ${NAMENODE} ${SECONDARY_NAMENODE} ${DATANODES/%${SECONDARY_NAMENODE}/}; do
-	    ssh $node "rm -rf /data*/hdfs /tmp/hadoop-*.pid /tmp/hsperfdata_*;"
-    done
-
-    # Remove hadoop, bigtop, and zookeeper rpms in each node
-    for node in ${NAMENODE} ${SECONDARY_NAMENODE} ${DATANODES/%${SECONDARY_NAMENODE}/}; do
-        ssh $node '\
-        hostname; \
-        for rpm in hadoop bigtop zookeeper; do \
-            echo "----------------------------------------------------------------------"; \
-            echo "Processing $rpm"; \
-            rpm -qa | grep $rpm; \
-            if [ $? = 0 ]; then \
-                yum remove -y $( rpm -qa | grep $rpm ); \
-                rpm -qa | grep $rpm; \
-            fi; \
-            rm -rf /etc/gphd/hadoop* /etc/gphd/zookeeper* /usr/lib/gphd/zookeeper* /var/lib/gphd/hadoop* /var/log/gphd/hadoop* /var/lib/gphd/zookeeper*; \
-            echo "----------------------------------------------------------------------"; \
-        done'
-    done
-}
-
-retrieve_hdfs_archive(){
-    echo "======================================================================"
-    
-    echo "Retrieving ${ARCHIVE_URL}"
-    
-    if [ ! -d "${DOWNLOAD_DIR}" ]; then
-        mkdir ${DOWNLOAD_DIR}
-    fi
-    
-    rm -rf ${DOWNLOAD_DIR}/*
-    
-    wget --no-verbose ${ARCHIVE_URL} -O ${DOWNLOAD_DIR}/${ARCHIVE}
-    if [ $? != 0 ]; then
-        echo "FATAL: retrieval failed (${ARCHIVE_URL})."
-        exit 2
-    fi
-    
-    echo "======================================================================"
-}
-
-## ######################################################################
-## MAIN Script
-## ######################################################################
-
-##
-## If necessary, stop hadoop services and remove previously installed
-## rpms (and files).
-##
-
-gphd_stop_remove
-
-##
-## Retrieve latest hdfs archive from jenkins server
-##
-
-retrieve_hdfs_archive
-
-##
-## Process retrieved archive
-##
-
-cd ${DOWNLOAD_DIR}
-
-if [ ! -f "${ARCHIVE}" ]; then
-    echo "FATAL: archive does note exist (${ARCHIVE})"
-    exit 2
-fi
-
-ls | grep archive.zip >> /dev/null
-if [ $? == 0 ]; then
-    unzip ${ARCHIVE}
-    if [ $? != 0 ]; then
-	echo "FATAL: unzip of archive failed (${ARCHIVE})."
-	exit 2
-    fi
-
-    cd archive/dist
-fi
-
-GPHD_TARBALL=$( ls )
-if [ ! -f "${GPHD_TARBALL}" ]; then
-    echo "FATAL: gphd tarball does not exist."
-    exit 2
-fi
-    
-GPHD_DIR=$( basename * .tar.gz )
-
-tar xf ${GPHD_TARBALL}
-if [ $? != 0 ]; then
-    echo "FATAL: tar expansion failed (${GPHD_TARBALL})."
-    exit 2
-fi
-
-##
-## Prepare file based yum repository
-##
-
-echo "======================================================================"
-
-cd ${GPHD_DIR}
-REPO_DIR=$( pwd )
-createrepo ${REPO_DIR}
-
-echo "======================================================================"
-
-rm -f /etc/yum.repos.d/gphd.repo
-
-cat > /etc/yum.repos.d/gphd.repo <<-EOF
-	[gphd]
-	name=Greenplum HD repository
-	baseurl=http://${SECONDARY_NAMENODE}/yum/base
-	enabled=1
-	gpgcheck=0
-EOF
-
-if [ ! -d /var/www/html/yum ] ; then
-    mkdir -p /var/www/html/yum
-    chmod 777 /var/www/html/yum
-fi
-rm -rf /var/www/html/yum/base
-
-rsync -auv ${REPO_DIR}/* /var/www/html/yum/base
-chmod -R 777 /var/www/html/yum/base
-cd ${DOWNLOAD_DIR}
-rm -rf ${DOWNLOAD_DIR}/*
-
-service httpd start
-
-##
-## Configure hdfs installation
-##
-
-if [ "${ENABLE_QUORUM}" = "true" ] && [ -n "${KDC}" ] ; then
-    CORE_SITE=core-site-secure-ha.xml
-    HADOOP_ENV=hadoop-env-secure.sh
-    HDFS_SITE=hdfs-site-secure-ha.xml
-elif [ "${ENABLE_QUORUM}" = "true" ] ; then
-    CORE_SITE=core-site-ha.xml
-    HADOOP_ENV=hadoop-env-ha.sh
-    HDFS_SITE=hdfs-site-ha.xml
-elif [ -n "${KDC}" ] ; then
-    CORE_SITE=core-site-secure.xml
-    HADOOP_ENV=hadoop-env-secure.sh
-    HDFS_SITE=hdfs-site-secure.xml
-else
-    CORE_SITE=core-site.xml
-    HADOOP_ENV=hadoop-env.sh
-    HDFS_SITE=hdfs-site.xml
-fi
-
-
-if [ "${ENABLE_QUORUM}" = "true" ] ; then
-    ZOO_LOG=log4j-ha.properties
-    ZOO_CFG=zoo.cfg
-
-    # Deploy zookeeper and jounalnode depend on datanodes number.
-    datanode_num=0
-
-    for node in ${DATANODES/%${SECONDARY_NAMENODE}/}; do 
-       datanode_num=`expr $datanode_num + 1` 
-    done
-
-    for num in {1..3}; do
-        declare ZKSERVER_$num=$( echo $DATANODES | cut -d" " -f ${num} )
-
-        if [ $datanode_num -ge 6 ]; then
-            num_jn=`expr $num + 3`
-        elif [ $datanode_num -gt 3 ]; then
-            num_jn=`expr $datanode_num - 3 + $num` 
-        else
-            num_jn=$num
-        fi 
-
-        declare JOURNALNODE_$num=$( echo $DATANODES | cut -d " " -f ${num_jn} )
-    done
-fi
-
-sed -e "s|%HDFS_HOST%|${NAMENODE}:9000|" --in-place=.orig ${script_dir}/templates/${CORE_SITE}
-
-if [ "${DCA_SETUP}" = true ]; then
-    sed -e "s|%JAVA_HOME%|${JAVA_HOME}|" \
-        -e "s|%NAMENODE_MEMORY%|-Xmx10240m|" \
-        -e "s|%DATANODE_MEMORY%|-Xmx8192m|" \
-        -e "s|%SECONDARYNAMENODE_MEMORY%|-Xmx10240m|" \
-        -e "s|%HADOOP_HOME%|${HADOOP_HOME}|" \
-        --in-place=.orig ${script_dir}/templates/${HADOOP_ENV}
-elif [ "${DCA_SETUP}" = "half" ]; then
-    sed -e "s|%JAVA_HOME%|${JAVA_HOME}|" \
-        -e "s|%NAMENODE_MEMORY%|-Xmx10240m|" \
-        -e "s|%DATANODE_MEMORY%|-Xmx8192m|" \
-        -e "s|%SECONDARYNAMENODE_MEMORY%|-Xmx10240m|" \
-        -e "s|%HADOOP_HOME%|${HADOOP_HOME}|" \
-        --in-place=.orig ${script_dir}/templates/${HADOOP_ENV}
-else
-    sed -e "s|%JAVA_HOME%|${JAVA_HOME}|" \
-        -e "s|%NAMENODE_MEMORY%|-Xmx4096m|" \
-        -e "s|%DATANODE_MEMORY%|-Xmx2048m|" \
-        -e "s|%SECONDARYNAMENODE_MEMORY%|-Xmx2048m|" \
-        -e "s|%HADOOP_HOME%|${HADOOP_HOME}|" \
-        --in-place=.orig ${script_dir}/templates/${HADOOP_ENV}
-fi
-
-if [ "${TWO_NAMENODE_DIR}" = true ]; then
-    sed -e "s|%DATA_DIR_ROOT%/hdfs/name<|${HDFS_DATA_DIRECTORY_ROOT}/hdfs/name,file:${HDFS_DATA_DIRECTORY_ROOT}/hdfs/name2<|" --in-place=.orig ${script_dir}/templates/${HDFS_SITE}
-fi
-
-sed -e "s|%DATA_DIR_ROOT%|${HDFS_DATA_DIRECTORY_ROOT}|" --in-place=.orig ${script_dir}/templates/${HDFS_SITE}
-
-DATA_FILESYSTEMS=$( ssh $( echo ${DATANODES} | cut -d" " -f 1 ) "df -kh 2> /dev/null | grep data | wc -l" )
-
-if [ $DATA_FILESYSTEMS -ge 2 ] ; then
-    echo "/data1 and /data2 found... using both!"
-    sed -e "s|%HDFS_DATANODE_DIR%|file:${HDFS_DATA_DIRECTORY_ROOT}1/hdfs/data,file:${HDFS_DATA_DIRECTORY_ROOT}2/hdfs/data|" --in-place=.orig ${script_dir}/templates/${HDFS_SITE}
-else
-    sed -e "s|%HDFS_DATANODE_DIR%|${HDFS_DATA_DIRECTORY_ROOT}/hdfs/data|" --in-place=.orig ${script_dir}/templates/${HDFS_SITE}
-fi
-
-if [ "${ENABLE_QUORUM}" = "true" ] ; then
-    sed -e "s|%ZKSERVER1%|${ZKSERVER_1}|" -e "s|%ZKSERVER2%|${ZKSERVER_2}|" -e "s|%ZKSERVER3%|${ZKSERVER_3}|" --in-place=.orig ${script_dir}/templates/${ZOO_CFG}
-    sed -e "s|%ZKSERVER1%|${ZKSERVER_1}|" -e "s|%ZKSERVER2%|${ZKSERVER_2}|" -e "s|%ZKSERVER3%|${ZKSERVER_3}|" --in-place=.orig ${script_dir}/templates/${CORE_SITE}
-
-    sed -e "s|%NAMENODE%|${NAMENODE}|" -e "s|%SECONDARY_NAMENODE%|${SECONDARY_NAMENODE}|" -e "s|%JOURNALNODE1%|${JOURNALNODE_1}|" -e "s|%JOURNALNODE2%|${JOURNALNODE_2}|" \
-        -e "s|%JOURNALNODE3%|${JOURNALNODE_3}|" -e "s|%JOURNAL_DATA_ROOT%|${JOURNAL_DATA_ROOT}|" --in-place=.orig ${script_dir}/templates/${HDFS_SITE}
-
-    sed -e "s|%ZOO_LOG_DIR%|${ZOO_LOG_DIR}|" --in-place=.orig ${script_dir}/templates/${ZOO_LOG}
-fi
-
-if [ "${YARN}" = "true" ] ; then
-    if [ "${DCA_SETUP}" = true ]; then
-       RESOURCE_MEM="8192" 
-    elif [ "${DCA_SETUP}" = "half" ]; then
-       RESOURCE_MEM="8192"
-    else
-       RESOURCE_MEM="2048"
-    fi
-    sed -e "s|%RESOURCE_MEM%|${RESOURCE_MEM}|" --in-place=.orig ${script_dir}/templates/yarn-site.xml
-    sed -e "s|%JAVA_HOME%|${JAVA_HOME}|" --in-place=.orig ${script_dir}/templates/yarn-env.sh
-fi
-
-rm ${script_dir}/templates/slaves
-for datanode in ${DATANODES}; do
-    echo "${datanode}" >> ${script_dir}/templates/slaves
-done
-
-rm ${script_dir}/templates/masters
-echo "${NAMENODE}" >> ${script_dir}/templates/masters
-echo "${SECONDARY_NAMENODE}" >> ${script_dir}/templates/masters
-
-##
-## Install hdfs rpms
-##
-
-for node in ${NAMENODE} ${SECONDARY_NAMENODE} ${DATANODES/%${SECONDARY_NAMENODE}/}; do
-    echo ""
-    echo "Remove old gphd directories in $node"
-    ssh $node "rm -rf /var/log/gphd/* /usr/lib/gphd/*"
-done
-
-for node in ${NAMENODE} ${SECONDARY_NAMENODE} ${DATANODES/%${SECONDARY_NAMENODE}/}; do
-    echo ""
-    echo "Passing YUM repo file to $node"
-    scp /etc/yum.repos.d/gphd.repo $node:/etc/yum.repos.d/gphd.repo
-
-    echo "Installing hdfs in $node"
-    ssh $node 'yum --disablerepo "*" --enablerepo "gphd" list available; \
-               yum install -y hadoop-conf-pseudo; \
-               yum install -y zookeeper-server;'
-
-    ##
-    ## Post rpm installation processing
-    ##
-
-    echo "cp libexec files in $node"
-    ssh $node "cp ${HADOOP_HOME}/libexec/* ${HADOOP_HOME}-hdfs/libexec"
-    ssh $node "cp /usr/lib/gphd/hadoop/libexec/* /usr/lib/gphd/hadoop-yarn/libexec/"
-
-    echo "scp slaves, masters, and core-site.xml file to $node"
-    scp ${script_dir}/templates/slaves $node:${HADOOP_CONF_DIR}/slaves
-    scp ${script_dir}/templates/masters $node:${HADOOP_CONF_DIR}/masters
-    scp ${script_dir}/templates/${CORE_SITE} $node:${HADOOP_CONF_DIR}/core-site.xml
-    scp ${script_dir}/templates/${HADOOP_ENV} $node:${HADOOP_CONF_DIR}/hadoop-env.sh
-
-    if [ "${YARN}" = "true" ] ; then
-        scp ${script_dir}/templates/yarn-env.sh $node:${HADOOP_CONF_DIR}/yarn-env.sh
-        scp ${script_dir}/templates/yarn-site.xml $node:${HADOOP_CONF_DIR}/yarn-site.xml
-
-        ssh $node mkdir ${HADOOP_YARN_HOME}/logs
-        ssh $node chmod 777 ${HADOOP_YARN_HOME}/logs
-        ssh $node chmod 777 /var/log/gphd/hadoop-yarn
-    fi
-
-    if [ "${ENABLE_QUORUM}" = "true" ] ; then
-	ssh $node "mkdir /usr/lib/gphd/zookeeper/etc; \
-                   ln -s /etc/gphd/zookeeper/conf /usr/lib/gphd/zookeeper/etc/zookeeper; \
-                   echo ZOO_LOG_DIR=/tmp/zookeeper/logs/ >> ${ZK_HOME}/bin/zkEnv.sh"
-	scp ${script_dir}/templates/${ZOO_CFG} $node:${ZK_CONF_DIR}/zoo.cfg
-	scp ${script_dir}/templates/${ZOO_LOG} $node:${ZK_CONF_DIR}/log4j.properties
-
-	if [ $node == $ZKSERVER_1 ] ; then
-	    ZK_NUM=1
-	elif [ $node == $ZKSERVER_2 ] ; then
-	    ZK_NUM=2
-	elif [ $node == $ZKSERVER_3 ] ; then
-	    ZK_NUM=3
-	fi
-
-	if [ $node == $ZKSERVER_1 ] || [ $node == $ZKSERVER_2 ] || [ $node == $ZKSERVER_3 ] ; then
-	    ssh $node "mkdir -p /tmp/zookeeper/logs && chown -R zookeeper.hadoop /tmp/zookeeper/ && echo ${ZK_NUM} > /tmp/zookeeper/myid; \
-                       grep ZK_HOME ~/.bash_profile >> /dev/null; \
-                       if [ $? != 0 ] ; then \
-                           echo 'export ZK_HOME=/usr/lib/gphd/zookeeper' >> ~/.bash_profile; \
-                           echo 'export ZOO_LOG_DIR=/tmp/zookeeper/logs' >> ~/.bash_profile; \
-                       fi"
-	fi
-    fi
-
-    scp ${script_dir}/templates/${HDFS_SITE} $node:${HADOOP_CONF_DIR}/hdfs-site.xml
-
-    ssh $node mkdir ${HADOOP_HOME}/logs
-    ssh $node chmod 777 ${HADOOP_HOME}/logs
-    echo ""
-done
-
-if [ "${YARN}" = "true" ] ; then
-    for node in ${RESOURCEMANAGER} ${NODEMANAGERS}; do
-        ssh ${node} cp /etc/gphd/hadoop/conf.empty/capacity-scheduler.xml /usr/lib/gphd/hadoop/etc/hadoop/
-    done
-fi
-
-if [ "${ENABLE_QUORUM}" = "true" ] ; then
-    if [ -n "${KDC}" ] ; then
-        HDFS_DN_SSH_COMMAND=""
-    else
-        HDFS_DN_SSH_COMMAND="sudo -u hdfs"
-    fi
-
-    for zkserver in ${ZKSERVER_1} ${ZKSERVER_2} ${ZKSERVER_3}; do
-	ssh $zkserver "sudo JAVA_HOME=${JAVA_HOME} /etc/init.d/zookeeper-server start"
-    done
-
-    for journalnode in ${JOURNALNODE_1} ${JOURNALNODE_2} ${JOURNALNODE_3}; do
-	ssh $journalnode "rm -rf ${JOURNAL_DATA_ROOT}; mkdir ${JOURNAL_DATA_ROOT}; chown hdfs.hdfs ${JOURNAL_DATA_ROOT};\
-                          JAVA_HOME=${JAVA_HOME} ${HDFS_DN_SSH_COMMAND} ${HADOOP_HOME}/sbin/hadoop-daemon.sh start journalnode"
-    done
-
-    ssh ${NAMENODE} "\
-      JAVA_HOME=${JAVA_HOME} sudo -u hdfs ${HADOOP_HOME}-hdfs/bin/hdfs namenode -format -force 2>&1 | tee ${HADOOP_HOME}/logs/hdfs-namenode-format.out; \
-      grep 'Exiting with status 0' ${HADOOP_HOME}/logs/hdfs-namenode-format.out; \
-      if [ \$? != 0 ]; then \
-        echo 'FATAL: format failed.'; \
-        exit 1; \
-      fi;
-      sudo -u hdfs ${HADOOP_HOME}/sbin/hadoop-daemon.sh start namenode"
-
-    ssh ${SECONDARY_NAMENODE} "\
-      JAVA_HOME=${JAVA_HOME} sudo -u hdfs ${HADOOP_HOME}-hdfs/bin/hdfs namenode -bootstrapStandby 2>&1 | tee ${HADOOP_HOME}/logs/hdfs-namenode-bootstrapStandby.out; \
-      grep 'Exiting with status 0' ${HADOOP_HOME}/logs/hdfs-namenode-bootstrapStandby.out; \
-      if [ \$? != 0 ]; then \
-        echo 'FATAL: bootstrapStandby failed.'; \
-        exit 1; \
-      fi;
-      sudo -u hdfs ${HADOOP_HOME}/sbin/hadoop-daemon.sh start namenode"
-
-    ssh ${NAMENODE} "JAVA_HOME=${JAVA_HOME} sudo -u hdfs ${HADOOP_HOME}-hdfs/bin/hdfs zkfc -formatZK -force"
-    for namenode in ${NAMENODE} ${SECONDARY_NAMENODE}; do
-	ssh $namenode "JAVA_HOME=${JAVA_HOME} sudo -u hdfs ${HADOOP_HOME}/sbin/hadoop-daemon.sh start zkfc"
-    done
-
-    for datanode in ${DATANODES}; do
-	    ssh $datanode ${HDFS_DN_SSH_COMMAND} /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start datanode
-    done
-else
-    ##
-    ## Format hdfs file system
-    ##
-
-    ssh ${NAMENODE} "\
-      sudo -u hdfs ${HADOOP_HOME}-hdfs/bin/hdfs namenode -format -force 2>&1 | tee ${HADOOP_HOME}/logs/hdfs-namenode-format.out; \
-      grep 'Exiting with status 0' ${HADOOP_HOME}/logs/hdfs-namenode-format.out; \
-      if [ \$? != 0 ]; then \
-        echo 'FATAL: format failed.'; \
-        exit 1; \
-      fi"
-
-    ##
-    ## Startup hadoop namenode and datanode services
-    ##
-
-    ssh ${NAMENODE}           sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start namenode
-
-    ssh ${SECONDARY_NAMENODE} sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start secondarynamenode
-
-    for datanode in ${DATANODES}; do
-	if [ -n "${KDC}" ] ; then
-	    ssh $datanode     /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start datanode
-	else
-	    ssh $datanode     sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start datanode
-	fi
-    done
-
-    if [ "${YARN}" = "true" ] ; then
-        ssh ${PHD_USER}@${RESOURCEMANAGER} JAVA_HOME=${JAVA_HOME} /usr/lib/gphd/hadoop-yarn/sbin/yarn-daemon.sh start resourcemanager 
-        for node in ${NODEMANAGERS}; do
-            ssh ${PHD_USER}@${node} JAVA_HOME=${JAVA_HOME} /usr/lib/gphd/hadoop-yarn/sbin/yarn-daemon.sh start nodemanager 
-        done
-    fi
-fi
-
-##
-## Before exiting, display running java processes with "jps" command
-##
-
-display_jps
-
-exit 0

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/ha_failover.sh
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/ha_failover.sh b/depends/libyarn/releng/bin/ha_failover.sh
deleted file mode 100644
index f596041..0000000
--- a/depends/libyarn/releng/bin/ha_failover.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-nn1_status=`/usr/bin/hdfs haadmin -getServiceState nn1`
-echo nn1 status is: $nn1_status
-nn2_status=`/usr/bin/hdfs haadmin -getServiceState nn2`
-echo nn2 status is: $nn2_status
-if [ "${nn1_status}" = "active" ]; then
-    echo "hdfs haadmin -failover nn1 nn2"
-    hdfs haadmin -failover nn1 nn2
-elif [ "${nn1_status}" = "standby" ]; then
-    echo "hdfs haadmin -failover nn2 nn1"
-    hdfs haadmin -failover nn2 nn1
-else
-    echo "Can't get valid status of nn1, exit."
-    exit 1
-fi
-nn1_status=`/usr/bin/hdfs haadmin -getServiceState nn1`
-echo nn1 status now is: $nn1_status
-nn2_status=`/usr/bin/hdfs haadmin -getServiceState nn2`
-echo nn2 status now is: $nn2_status

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/install-phd.sh
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/install-phd.sh b/depends/libyarn/releng/bin/install-phd.sh
deleted file mode 100644
index 4d63191..0000000
--- a/depends/libyarn/releng/bin/install-phd.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/sh
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-if [ -z ${DATA_PATH} ]; then
-	echo "DATA_PATH not set"
-	exit 1 
-fi
-
-if [ -d ${DATA_PATH} ]; then
-	cd ${DATA_PATH}
-else
-	echo "DATA_PATH not exist"
-        exit 1
-fi
-
-HADOOP_TARBALL=`curl --silent --no-buffer "http://hdp4-mdw1.wbe.dh.greenplum.com/dist/PHD/latest/?C=M;O=D" | grep -o 'PHD-2.[0-9].[0-9].[0-9]-bin-[0-9]*.tar.gz' | head -n 1`
-echo "HADOOP tarball: " ${HADOOP_TARBALL}
-
-HADOOP_URL="http://hdp4-mdw1.wbe.dh.greenplum.com/dist/PHD/latest/"${HADOOP_TARBALL}
-echo "Download HADOOP from " ${HADOOP_URL}
-
-curl --silent -o ${DATA_PATH}/${HADOOP_TARBALL} -L ${HADOOP_URL}
-
-tar -xzf ${DATA_PATH}/${HADOOP_TARBALL}
-
-HADOOP_PACKAGE=`echo ${HADOOP_TARBALL} | grep -o 'PHD-[0-9].[0-9].[0-9].[0-9]-bin-[0-9]*'`
-HADOOP_VERSION=`ls ${DATA_PATH}/${HADOOP_PACKAGE}/hadoop/tar/*.tar.gz | grep -o 'hadoop-[0-9].[0-9].[0-9]-[A-Za-z0-9\-]*-[0-9].[0-9].[0-9].[0-9]'`
-echo "HADOOP version: " ${HADOOP_VERSION}
-
-if [ -z ${HADOOP_VERSION} ]; then
-	echo "cannot get HADOOP version"
-	exit 1
-fi
-
-tar -xzf ${HADOOP_PACKAGE}/hadoop/tar/${HADOOP_VERSION}.tar.gz
-
-if [ -z ${HDFS_CONFIG_PATH} ]; then
-        echo "HDFS_CONFIG_PATH not set"
-        exit 1
-fi
-
-if [ -f ${HDFS_CONFIG_PATH} ]; then
-	cp -f ${HDFS_CONFIG_PATH} ${DATA_PATH}/${HADOOP_VERSION}/etc/hadoop/
-else
-	echo "HDFS_CONFIG_PATH not a file"
-	exit 1
-fi
-
-HADOOP_BIN=${DATA_PATH}/${HADOOP_VERSION}/bin
-HADOOP_SBIN=${DATA_PATH}/${HADOOP_VERSION}/sbin
-
-${HADOOP_BIN}/hdfs namenode -format
-${HADOOP_SBIN}/hadoop-daemon.sh start namenode
-${HADOOP_SBIN}/hadoop-daemon.sh start datanode
-${HADOOP_BIN}/hdfs dfs -mkdir hdfs://localhost:9000/user
-${HADOOP_BIN}/hdfs dfs -chmod 777 hdfs://localhost:9000/user

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/setup_gphdinst.sh
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/setup_gphdinst.sh b/depends/libyarn/releng/bin/setup_gphdinst.sh
deleted file mode 100644
index 64a82c1..0000000
--- a/depends/libyarn/releng/bin/setup_gphdinst.sh
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# Usage: ./setup_gphdinst.sh setup / start HK / stop HK 
-# HK means this is for HA/Kerberos
-#
-
-JAVA_HOME=${JAVA_HOME:="/opt/jdk1.7.0_15"}
-HADOOP_HOME="/usr/lib/gphd/hadoop"
-LOCAL_HDFS_RPMINSTALL="true"
-DCA_SETUP=${DCA_SETUP:=false}
-TWO_NAMENODE_DIR=${TWO_NAMENODE_DIR:=false}
-SEGMENT_SYSTEM_LIST=${SEGMENT_SYSTEM_LIST:="sdw1 sdw2 sdw3"}
-MASTERHOST=${MASTERHOST:=mdw}
-STANDBYHOST=${STANDBYHOST:=smdw}
-DATANODES_LIST=${SEGMENT_SYSTEM_LIST}
-USER="gpadmin"
-STEP="$1"
-ENABLE_QUORUM=${ENABLE_QUORUM:=true}
-
-if [ "${ENABLE_QUORUM}" = "true" ]; then 
-    ZKSERVER_LIST=$( echo ${SEGMENT_SYSTEM_LIST} | cut -d' ' -f1-3 )
-
-    # Deploy zookeeper and jounalnode depend on datanodes number.
-    datanode_num=0
-
-    for node in ${SEGMENT_SYSTEM_LIST}; do   
-       datanode_num=`expr $datanode_num + 1` 
-    done 
-
-    if [ $datanode_num -ge 6 ]; then 
-        JOURNALNODE_LIST=$( echo ${SEGMENT_SYSTEM_LIST} | cut -d' ' -f4-6 )
-    elif [ $datanode_num -eq 5 ]; then 
-        JOURNALNODE_LIST=$( echo ${SEGMENT_SYSTEM_LIST} | cut -d' ' -f3-5 )
-    elif [ $datanode_num -eq 4 ]; then 
-        JOURNALNODE_LIST=$( echo ${SEGMENT_SYSTEM_LIST} | cut -d' ' -f2-4 )
-    else 
-        JOURNALNODE_LIST=$( echo ${SEGMENT_SYSTEM_LIST} | cut -d' ' -f1-3 )
-    fi
-    echo "ZKSERVER_LIST is ${ZKSERVER_LIST}"
-    echo "JOURNALNODE_LIST is ${JOURNALNODE_LIST}"
-fi
-
-if [ "${STEP}" = "setup" ]; then
-    PHD_PACKAGE=$( curl --silent --no-buffer "http://hdp4-mdw1.wbe.dh.greenplum.com/dist/PHD/testing/?C=N;O=D" | grep -o 'PHD-[0-9].[0-9].[0-9].[0-9]-[0-9]*.tar.gz' | head -n 1 )
-    HDFS_DOWNLOAD_URL="http://hdp4-mdw1.wbe.dh.greenplum.com/dist/PHD/testing/${PHD_PACKAGE}"
-
-    echo "Hadoop Package Download URL: ${HDFS_DOWNLOAD_URL}"
-    echo "Setting up latest HDFS..."
-
-    sudo DATANODES="${DATANODES_LIST}" \
-         NAMENODE=${STANDBYHOST} \
-         SECONDARY_NAMENODE=${MASTERHOST} \
-         RESOURCEMANAGER=${STANDBYHOST} \
-         NODEMANAGERS="${DATANODES_LIST}" \
-         ARCHIVE_URL=${HDFS_DOWNLOAD_URL} \
-         JAVA_HOME=${JAVA_HOME} \
-         DCA_SETUP=${DCA_SETUP} \
-         TWO_NAMENODE_DIR=${TWO_NAMENODE_DIR} \
-         KDC=${KDC} \
-         ENABLE_QUORUM=${ENABLE_QUORUM} \
-         YARN=${YARN} \
-         PHD_USER=${USER} \
-         bash gphdinst.sh
-fi
-
-if [ -n "${KDC}" ] ; then
-    HDFS_DN_SSH_COMMAND="sudo"
-elif [ "$2" = "HK" ]; then
-    HDFS_DN_SSH_COMMAND="sudo"
-else
-    HDFS_DN_SSH_COMMAND="sudo -u hdfs"
-fi
-
-if [ "${STEP}" = "stop" ]; then
-    if [ -f "${HADOOP_HOME}/bin/stop-all.sh" ]; then 
-        ${HADOOP_HOME}/bin/stop-all.sh
-    fi   
-
-    if [ "${LOCAL_HDFS_RPMINSTALL}" = "true" ]; then
-        if [ "${USE_MASTER_AS_DATANODE}" = "true" ]; then
-            DATANODES_LIST="${SEGMENT_SYSTEM_LIST} ${MASTERHOST}"
-        else
-            DATANODES_LIST=${SEGMENT_SYSTEM_LIST}
-        fi
-
-        if [ "${ENABLE_QUORUM}" = "true" ]; then
-            echo "Stopping local HDFS HA cluster..."
-            for datanode in ${DATANODES_LIST}; do
-                ssh $datanode ${HDFS_DN_SSH_COMMAND} /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh stop datanode
-            done
-
-            for node in ${JOURNALNODE_LIST}; do
-                ssh $node ${HDFS_DN_SSH_COMMAND} /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh stop journalnode
-            done
-
-            for node in ${ZKSERVER_LIST}; do
-                ssh $node sudo JAVA_HOME=${JAVA_HOME} /etc/init.d/zookeeper-server stop
-            done
-
-            sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh stop zkfc
-            ssh ${STANDBYHOST} sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh stop zkfc
-            sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh stop namenode
-            ssh ${STANDBYHOST} sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh stop namenode
-        else
-            echo "Stopping local HDFS cluster..."
-
-            sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh stop secondarynamenode
-
-            for datanode in ${DATANODES_LIST}; do
-                ssh $datanode ${HDFS_DN_SSH_COMMAND} /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh stop datanode
-                if [ "${YARN}" = "true" ] ; then
-                    ssh $datanode "/usr/lib/gphd/hadoop-yarn/sbin/yarn-daemon.sh stop nodemanager"
-                fi
-            done
-
-            ssh ${STANDBYHOST} sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh stop namenode
-            if [ "${YARN}" = "true" ] ; then
-                ssh ${STANDBYHOST} /usr/lib/gphd/hadoop-yarn/sbin/yarn-daemon.sh stop resourcemanager
-            fi
-         fi
-    fi
-fi
-
-if [ "${STEP}" = "start" ]; then
-
-                                if [ "${ENABLE_QUORUM}" = "true" ]; then
-                                    echo "Starting up local HDFS HA"
-
-                                    for node in ${JOURNALNODE_LIST}; do
-                                        ssh $node ${HDFS_DN_SSH_COMMAND} /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start journalnode
-                                    done
-
-                                    for node in ${ZKSERVER_LIST}; do
-                                        ssh $node sudo JAVA_HOME=${JAVA_HOME} /etc/init.d/zookeeper-server start
-                                    done
-                    
-                                    ssh ${STANDBYHOST} sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start zkfc
-                                    sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start zkfc
-                                    ssh ${STANDBYHOST} sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start namenode
-                                    sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start namenode
-
-                                    for datanode in ${DATANODES_LIST}; do
-                                        ssh $datanode ${HDFS_DN_SSH_COMMAND} /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start datanode
-                                    done
-                                else
-                                    echo "Starting up local HDFS"
-                                    ssh ${STANDBYHOST} sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start namenode
-                                    if [ "${YARN}" = "true" ]; then
-                                        ssh ${STANDBYHOST} /usr/lib/gphd/hadoop-yarn/sbin/yarn-daemon.sh start resourcemanager
-                                    fi
-                                    for datanode in ${DATANODES_LIST}; do
-                                        ssh $datanode ${HDFS_DN_SSH_COMMAND} /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start datanode
-                                        if [ "${YARN}" = "true" ]; then
-                                            ssh $datanode /usr/lib/gphd/hadoop-yarn/sbin/yarn-daemon.sh start nodemanager
-                                        fi
-                                    done
-                                    sudo -u hdfs /usr/lib/gphd/hadoop/sbin/hadoop-daemon.sh start secondarynamenode
-                                fi
-fi

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/.core-site-secure-ha.xml.swp
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/.core-site-secure-ha.xml.swp b/depends/libyarn/releng/bin/templates/.core-site-secure-ha.xml.swp
deleted file mode 100644
index 43e78f7..0000000
Binary files a/depends/libyarn/releng/bin/templates/.core-site-secure-ha.xml.swp and /dev/null differ

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/core-site-ha.xml
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/core-site-ha.xml b/depends/libyarn/releng/bin/templates/core-site-ha.xml
deleted file mode 100644
index ef31461..0000000
--- a/depends/libyarn/releng/bin/templates/core-site-ha.xml
+++ /dev/null
@@ -1,28 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://gphd-cluster</value>
-    </property>
-    <property>
-        <name>ha.zookeeper.quorum</name>
-        <value>%ZKSERVER1%:2181,%ZKSERVER2%:2181,%ZKSERVER3%:2181</value>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/core-site-secure-ha.xml
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/core-site-secure-ha.xml b/depends/libyarn/releng/bin/templates/core-site-secure-ha.xml
deleted file mode 100755
index cf96ce1..0000000
--- a/depends/libyarn/releng/bin/templates/core-site-secure-ha.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-    <property>
-        <name>hadoop.security.authentication</name>
-        <value>kerberos</value>
-    </property>
-    
-    <property>
-        <name>hadoop.security.authorization</name>
-        <value>true</value>
-    </property>
-
-    <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://gphd-cluster</value>
-    </property>
-
-    <property>
-        <name>ha.zookeeper.quorum</name>
-        <value>%ZKSERVER1%:2181,%ZKSERVER2%:2181,%ZKSERVER3%:2181</value>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/core-site-secure.xml
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/core-site-secure.xml b/depends/libyarn/releng/bin/templates/core-site-secure.xml
deleted file mode 100755
index 1424ebb..0000000
--- a/depends/libyarn/releng/bin/templates/core-site-secure.xml
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-    <property>
-        <name>fs.default.name</name>
-        <value>hdfs://%HDFS_HOST%</value>
-    </property>
-
-    <property>
-        <name>hadoop.security.authentication</name>
-        <value>kerberos</value>
-    </property>
-    
-    <property>
-        <name>hadoop.security.authorization</name>
-        <value>true</value>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/core-site.xml
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/core-site.xml b/depends/libyarn/releng/bin/templates/core-site.xml
deleted file mode 100644
index 120e1d4..0000000
--- a/depends/libyarn/releng/bin/templates/core-site.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-    <property>
-        <name>fs.default.name</name>
-        <value>hdfs://%HDFS_HOST%</value>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/download-config
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/download-config b/depends/libyarn/releng/bin/templates/download-config
deleted file mode 100644
index e517b49..0000000
--- a/depends/libyarn/releng/bin/templates/download-config
+++ /dev/null
@@ -1,5 +0,0 @@
-hawq-1.1.0=http://hdp4-mdw1.wbe.dh.greenplum.com/dist/PHD/stable/PHD1.0.1/PHD-1.0.1.0-19.tar.gz
-hawq-1.1.1=http://hdp4-mdw1.wbe.dh.greenplum.com/dist/PHD/stable/PHD1.0.2/PHD-1.0.2.0-7.tar.gz
-hawq-1.1.2=http://hdp4-mdw1.wbe.dh.greenplum.com/dist/PHD/stable/PHD1.0.3/PHD-1.0.3.0-66.tar.gz
-hawq-1.1.3=http://hdp4-mdw1.wbe.dh.greenplum.com/dist/PHD/stable/PHD1.1.0/PHD-1.1.0.0-76.tar.gz
-hawq-1.1.4=http://hdp4-mdw1.wbe.dh.greenplum.com/dist/PHD/testing/PHD-1.1.1.0-82.tar.gz

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/hadoop-env-ha.sh
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/hadoop-env-ha.sh b/depends/libyarn/releng/bin/templates/hadoop-env-ha.sh
deleted file mode 100644
index ede6744..0000000
--- a/depends/libyarn/releng/bin/templates/hadoop-env-ha.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.
-export JAVA_HOME=%JAVA_HOME%
-export HADOOP_HOME=%HADOOP_HOME%
-export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
-
-# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
-for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
-  if [ "$HADOOP_CLASSPATH" ]; then
-    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
-  else
-    export HADOOP_CLASSPATH=$f
-  fi
-done
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} %NAMENODE_MEMORY% $HADOOP_NAMENODE_OPTS"
-export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS %DATANODE_MEMORY% -Xss256k $HADOOP_DATANODE_OPTS"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} %SECONDARYNAMENODE_MEMORY% $HADOOP_SECONDARYNAMENODE_OPTS"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
-
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
-
-# The directory where pid files are stored. /tmp by default.
-# NOTE: this should be set to a directory that can only be written to by
-#       the user that will run the hadoop daemons.  Otherwise there is the
-#       potential for a symlink attack.
-export HADOOP_PID_DIR=${HADOOP_PID_DIR}
-export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-umask 022

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/hadoop-env-secure.sh
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/hadoop-env-secure.sh b/depends/libyarn/releng/bin/templates/hadoop-env-secure.sh
deleted file mode 100755
index 03f9900..0000000
--- a/depends/libyarn/releng/bin/templates/hadoop-env-secure.sh
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.
-export JAVA_HOME=%JAVA_HOME%
-export HADOOP_HOME=%HADOOP_HOME%
-export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
-
-# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
-for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
-  if [ "$HADOOP_CLASSPATH" ]; then
-    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
-  else
-    export HADOOP_CLASSPATH=$f
-  fi
-done
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} %NAMENODE_MEMORY% $HADOOP_NAMENODE_OPTS"
-export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS %DATANODE_MEMORY% -Xss256k $HADOOP_DATANODE_OPTS"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} %SECONDARYNAMENODE_MEMORY% $HADOOP_SECONDARYNAMENODE_OPTS"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
-
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=hdfs
-export JSVC_HOME=/usr/bin
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
-
-# The directory where pid files are stored. /tmp by default.
-# NOTE: this should be set to a directory that can only be written to by
-#       the user that will run the hadoop daemons.  Otherwise there is the
-#       potential for a symlink attack.
-export HADOOP_PID_DIR=${HADOOP_PID_DIR}
-export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-umask 022

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/hadoop-env.sh b/depends/libyarn/releng/bin/templates/hadoop-env.sh
deleted file mode 100644
index ede6744..0000000
--- a/depends/libyarn/releng/bin/templates/hadoop-env.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.
-export JAVA_HOME=%JAVA_HOME%
-export HADOOP_HOME=%HADOOP_HOME%
-export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
-
-# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
-for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
-  if [ "$HADOOP_CLASSPATH" ]; then
-    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
-  else
-    export HADOOP_CLASSPATH=$f
-  fi
-done
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} %NAMENODE_MEMORY% $HADOOP_NAMENODE_OPTS"
-export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS %DATANODE_MEMORY% -Xss256k $HADOOP_DATANODE_OPTS"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} %SECONDARYNAMENODE_MEMORY% $HADOOP_SECONDARYNAMENODE_OPTS"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
-
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
-
-# The directory where pid files are stored. /tmp by default.
-# NOTE: this should be set to a directory that can only be written to by
-#       the user that will run the hadoop daemons.  Otherwise there is the
-#       potential for a symlink attack.
-export HADOOP_PID_DIR=${HADOOP_PID_DIR}
-export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-umask 022

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/hdfs-site-datanode-secure-ha.xml
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/hdfs-site-datanode-secure-ha.xml b/depends/libyarn/releng/bin/templates/hdfs-site-datanode-secure-ha.xml
deleted file mode 100755
index 966234d..0000000
--- a/depends/libyarn/releng/bin/templates/hdfs-site-datanode-secure-ha.xml
+++ /dev/null
@@ -1,240 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-    <property>
-        <name>dfs.encrypt.data.transfer</name>
-        <value>false</value>
-    </property>
-    <property>
-        <name>dfs.encrypt.data.transfer.algorithm</name>
-        <value>rc4</value>
-        <description>may be "rc4" or "3des" - 3des has a significant performance impact</description>
-    </property>
-    <property>
-        <name>dfs.datanode.keytab.file</name>
-        <value>/etc/gphd/hawq-krb5.keytab</value>
-    </property>
-    <property>
-        <name>dfs.datanode.kerberos.principal</name>
-        <value>hdfs/_HOST@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.datanode.kerberos.http.principal</name>
-      <value>HTTP/_HOST@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.datanode.address</name>
-      <value>0.0.0.0:1004</value>
-    </property>
-    <property>
-      <name>dfs.datanode.http.address</name>
-      <value>0.0.0.0:1006</value>
-    </property>
-    <property>
-        <name>dfs.datanode.data.dir.perm</name>
-        <value>755</value>
-    </property>
-    <property>
-        <name>dfs.journalnode.keytab.file</name>
-        <value>/etc/gphd/hawq-krb5.keytab</value>
-    </property>
-    <property>
-        <name>dfs.journalnode.kerberos.principal</name>
-        <value>hdfs/_HOST@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.journalnode.kerberos.internal.spnego.principal</name>
-      <value>HTTP/_HOST@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-        <name>dfs.namenode.keytab.file</name>
-        <value>/etc/gphd/hawq-krb5.keytab</value>
-    </property>
-    <property>
-        <name>dfs.namenode.kerberos.principal</name>
-        <value>hdfs/_HOST@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.namenode.kerberos.http.principal</name>
-      <value>HTTP/_HOST@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.namenode.kerberos.internal.spnego.principal</name>
-      <value>HTTP/_HOST@HAWQ.PIVOTAL.COM</value>
-    </property>
-
-    <property>
-      <name>dfs.block.access.token.enable</name>
-      <value>true</value>
-    </property>
-
-    <property>
-      <name>dfs.web.authentication.kerberos.principal</name>
-      <value>HTTP/_HOST@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.web.authentication.kerberos.keytab</name>
-      <value>/etc/gphd/hawq-krb5.keytab</value>
-    </property>
-    <property>
-        <name>dfs.web.ugi</name>
-        <value>hdfs,hdfs</value>
-        <description>The user account used by the web interface.</description>
-    </property>
-    <property>
-        <name>dfs.permissions</name>
-        <value>true</value>
-    </property>
-    <property>
-        <name>dfs.webhdfs.enabled</name>
-        <value>true</value>
-    </property>
-    <property>
-        <name>dfs.support.append</name>
-        <value>true</value>
-    </property>
-    <property>
-        <name>dfs.client.read.shortcircuit</name>
-        <value>false</value>
-    </property>
-    <property>
-        <name>dfs.block.local-path-access.user</name>
-        <value>gpadmin</value>
-        <description>
-            Specify the user allowed to do short circuit read
-        </description>
-    </property>
-    <property>
-        <name>dfs.safemode.extension</name>
-        <value>0</value>
-    </property>
-    <property>
-        <name>dfs.safemode.min.datanodes</name>
-        <value>1</value>
-    </property>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>%DATA_DIR_ROOT%</value>
-    </property>
-    <property>
-        <name>dfs.namenode.name.dir</name>
-        <value>file:%DATA_DIR_ROOT%/hdfs/name</value>
-    </property>
-    <property>
-        <name>dfs.namenode.checkpoint.dir</name>
-        <value>file:%DATA_DIR_ROOT%/hdfs/namesecondary</value>
-    </property>
-    <property>
-        <name>dfs.namenode.checkpoint.period</name>
-        <value>3600</value>
-    </property>
-    <property>
-        <name>dfs.datanode.data.dir</name>
-        <value>%HDFS_DATANODE_DIR%</value>
-    </property>
-    <property>
-        <name>dfs.replication</name>
-        <value>3</value>
-    </property>
-    <property>
-        <name>dfs.datanode.max.transfer.threads</name>
-        <value>40960</value>
-    </property>
-    <property>
-        <name>dfs.client.socket-timeout</name>
-        <value>300000000</value>
-    </property>
-    <property>
-        <name>dfs.datanode.handler.count</name>
-        <value>60</value>
-    </property>
-    <property>
-        <name>dfs.namenode.handler.count</name>
-        <value>60</value>
-    </property>
-    <property>
-        <name>ipc.client.connection.maxidletime</name>
-        <value>3600000</value>
-    </property>
-    <property>
-        <name>ipc.server.handler.queue.size</name>
-        <value>3300</value>
-    </property>
-    <property>
-        <name>ipc.client.connection</name>
-        <value>3</value>
-    </property>
-    <property>
-        <name>dfs.namenode.accesstime.precision</name>
-        <value>-1</value>
-    </property>
-
-    <property>
-        <name>dfs.nameservices</name>
-        <value>gphd-cluster</value>
-    </property>
-    <property>
-        <name>dfs.ha.namenodes.gphd-cluster</name>
-        <value>nn1,nn2</value>
-    </property>
-    <property>
-        <name>dfs.namenode.rpc-address.gphd-cluster.nn1</name>
-        <value>%NAMENODE%:9000</value>
-    </property>
-    <property>
-        <name>dfs.namenode.rpc-address.gphd-cluster.nn2</name>
-        <value>%SECONDARY_NAMENODE%:9000</value>
-    </property>
-    <property>
-        <name>dfs.namenode.http-address.gphd-cluster.nn1</name>
-        <value>%NAMENODE%:50070</value>
-    </property>
-    <property>
-        <name>dfs.namenode.http-address.gphd-cluster.nn2</name>
-        <value>%SECONDARY_NAMENODE%:50070</value>
-    </property>
-    <property>
-        <name>dfs.namenode.shared.edits.dir</name>
-        <value>qjournal://%ZKSERVER1%:8485;%ZKSERVER2%:8485;%ZKSERVER3%:8485/gphd-cluster</value>
-    </property>
-    <property>
-        <name>dfs.journalnode.edits.dir</name>
-        <value>%JOURNAL_DATA_ROOT%</value> 
-    </property>
-    <property>
-        <name>dfs.client.failover.proxy.provider.gphd-cluster</name>
-        <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
-    </property>
-    <property>
-        <name>dfs.ha.fencing.methods</name>
-        <value>shell(/bin/true)</value>
-    </property>
-    <property>
-        <name>dfs.ha.fencing.ssh.connect-timeout</name>
-        <value>20000</value>
-    </property>
-    <property>
-        <name>dfs.ha.automatic-failover.enabled</name>
-        <value>true</value>
-    </property>
-        <property>
-        <name>dfs.namenode.fs-limits.min-block-size</name>
-        <value>1024</value>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/hdfs-site-datanode-secure.xml
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/hdfs-site-datanode-secure.xml b/depends/libyarn/releng/bin/templates/hdfs-site-datanode-secure.xml
deleted file mode 100755
index 89b33cc..0000000
--- a/depends/libyarn/releng/bin/templates/hdfs-site-datanode-secure.xml
+++ /dev/null
@@ -1,193 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-    <property>
-        <name>dfs.encrypt.data.transfer</name>
-        <value>false</value>
-    </property>
-    <property>
-        <name>dfs.encrypt.data.transfer.algorithm</name>
-        <value>rc4</value>
-        <description>may be "rc4" or "3des" - 3des has a significant performance impact</description>
-    </property>
-    <property>
-        <name>dfs.datanode.keytab.file</name>
-        <value>/etc/gphd/hawq-krb5.keytab</value>
-    </property>
-    <property>
-        <name>dfs.datanode.kerberos.principal</name>
-        <value>hdfs/%DATANODE_PRINCIPAL%@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.datanode.kerberos.http.principal</name>
-      <value>HTTP/%DATANODE_PRINCIPAL%@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.datanode.address</name>
-      <value>0.0.0.0:1004</value>
-    </property>
-    <property>
-      <name>dfs.datanode.http.address</name>
-      <value>0.0.0.0:1006</value>
-    </property>
-    <property>
-        <name>dfs.datanode.data.dir.perm</name>
-        <value>755</value>
-    </property>
-    <property>
-        <name>dfs.namenode.keytab.file</name>
-        <value>/etc/gphd/hawq-krb5.keytab</value>
-    </property>
-    <property>
-        <name>dfs.namenode.kerberos.principal</name>
-        <value>hdfs/%NAMENODE_PRINCIPAL%@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.namenode.kerberos.http.principal</name>
-      <value>HTTP/%NAMENODE_PRINCIPAL%@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.namenode.kerberos.internal.spnego.principal</name>
-      <value>HTTP/%NAMENODE_PRINCIPAL%@HAWQ.PIVOTAL.COM</value>
-    </property>
-
-    <property>
-      <name>dfs.secondary.namenode.keytab.file</name>
-      <value>/etc/gphd/hawq-krb5.keytab</value>
-    </property>
-    <property>
-      <name>dfs.secondary.namenode.kerberos.principal</name>
-      <value>hdfs/%SECONDARYNAMENODE_PRINCIPAL%@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.secondary.namenode.kerberos.http.principal</name>
-      <value>HTTP/%SECONDARYNAMENODE_PRINCIPAL%@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.secondary.namenode.kerberos.internal.spnego.principal</name>
-      <value>HTTP/%SECONDARYNAMENODE_PRINCIPAL%@HAWQ.PIVOTAL.COM</value>
-    </property>
-
-    <property>
-      <name>dfs.block.access.token.enable</name>
-      <value>true</value>
-    </property>
-
-    <property>
-      <name>dfs.web.authentication.kerberos.principal</name>
-      <value>HTTP/%WEBAUTH_HOST%@HAWQ.PIVOTAL.COM</value>
-    </property>
-    <property>
-      <name>dfs.web.authentication.kerberos.keytab</name>
-      <value>/etc/gphd/hawq-krb5.keytab</value>
-    </property>
-
-    <property>
-        <name>dfs.web.ugi</name>
-        <value>hdfs,hdfs</value>
-        <description>The user account used by the web interface.</description>
-    </property>
-    <property>
-        <name>dfs.permissions</name>
-        <value>true</value>
-    </property>
-    <property>
-        <name>dfs.webhdfs.enabled</name>
-        <value>true</value>
-    </property>
-    <property>
-        <name>dfs.support.append</name>
-        <value>true</value>
-    </property>
-    <property>
-        <name>dfs.client.read.shortcircuit</name>
-        <value>false</value>
-    </property>
-    <property>
-        <name>dfs.block.local-path-access.user</name>
-        <value>gpadmin</value>
-        <description>
-            Specify the user allowed to do short circuit read
-        </description>
-    </property>
-    <property>
-        <name>dfs.safemode.extension</name>
-        <value>0</value>
-    </property>
-    <property>
-        <name>dfs.safemode.min.datanodes</name>
-        <value>1</value>
-    </property>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>%DATA_DIR_ROOT%</value>
-    </property>
-    <property>
-        <name>dfs.namenode.name.dir</name>
-        <value>file:%DATA_DIR_ROOT%/hdfs/name</value>
-    </property>
-    <property>
-        <name>dfs.namenode.checkpoint.dir</name>
-        <value>file:%DATA_DIR_ROOT%/hdfs/namesecondary</value>
-    </property>
-    <property>
-        <name>dfs.namenode.checkpoint.period</name>
-        <value>3600</value>
-    </property>
-    <property>
-        <name>dfs.datanode.data.dir</name>
-        <value>%HDFS_DATANODE_DIR%</value>
-    </property>
-    <property>
-        <name>dfs.replication</name>
-        <value>3</value>
-    </property>
-    <property>
-        <name>dfs.datanode.max.transfer.threads</name>
-        <value>40960</value>
-    </property>
-    <property>
-        <name>dfs.client.socket-timeout</name>
-        <value>300000000</value>
-    </property>
-    <property>
-        <name>dfs.datanode.handler.count</name>
-        <value>60</value>
-    </property>
-    <property>
-        <name>dfs.namenode.handler.count</name>
-        <value>60</value>
-    </property>
-    <property>
-        <name>ipc.client.connection.maxidletime</name>
-        <value>3600000</value>
-    </property>
-    <property>
-        <name>ipc.server.handler.queue.size</name>
-        <value>3300</value>
-    </property>
-    <property>
-        <name>ipc.client.connection</name>
-        <value>3</value>
-    </property>
-    <property>
-        <name>dfs.namenode.accesstime.precision</name>
-        <value>-1</value>
-    </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/10909db4/depends/libyarn/releng/bin/templates/hdfs-site-ha.xml
----------------------------------------------------------------------
diff --git a/depends/libyarn/releng/bin/templates/hdfs-site-ha.xml b/depends/libyarn/releng/bin/templates/hdfs-site-ha.xml
deleted file mode 100644
index 7fb7c1d..0000000
--- a/depends/libyarn/releng/bin/templates/hdfs-site-ha.xml
+++ /dev/null
@@ -1,172 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-    <property>
-        <name>dfs.web.ugi</name>
-        <value>hdfs,hdfs</value>
-        <description>The user account used by the web interface.</description>
-    </property>
-    <property>
-        <name>dfs.permissions</name>
-        <value>true</value>
-    </property>
-    <property>
-        <name>dfs.webhdfs.enabled</name>
-        <value>true</value>
-    </property>
-    <property>
-        <name>dfs.support.append</name>
-        <value>true</value>
-    </property>
-    <property>
-        <name>dfs.client.read.shortcircuit</name>
-        <value>false</value>
-    </property>
-    <property>
-        <name>dfs.block.local-path-access.user</name>
-        <value>gpadmin</value>
-        <description>
-            Specify the user allowed to do short circuit read
-        </description>
-    </property>
-    <property>
-        <name>dfs.safemode.extension</name>
-        <value>0</value>
-    </property>
-    <property>
-        <name>dfs.safemode.min.datanodes</name>
-        <value>1</value>
-    </property>
-    <property>
-        <name>hadoop.tmp.dir</name>
-        <value>%DATA_DIR_ROOT%</value>
-    </property>
-    <property>
-        <name>dfs.namenode.name.dir</name>
-        <value>file:%DATA_DIR_ROOT%/hdfs/name</value>
-    </property>
-    <property>
-        <name>dfs.namenode.checkpoint.dir</name>
-        <value>file:%DATA_DIR_ROOT%/hdfs/namesecondary</value>
-    </property>
-    <property>
-        <name>dfs.namenode.checkpoint.period</name>
-        <value>3600</value>
-    </property>
-    <property>
-        <name>dfs.datanode.data.dir</name>
-        <value>%HDFS_DATANODE_DIR%</value>
-    </property>
-    <property>
-        <name>dfs.datanode.data.dir.perm</name>
-        <value>755</value>
-    </property>
-    <property>
-        <name>dfs.replication</name>
-        <value>3</value>
-    </property>
-    <property>
-        <name>dfs.datanode.max.transfer.threads</name>
-        <value>40960</value>
-    </property>
-    <property>
-        <name>dfs.client.socket-timeout</name>
-        <value>300000000</value>
-    </property>
-    <property>
-        <name>dfs.datanode.handler.count</name>
-        <value>60</value>
-    </property>
-    <property>
-        <name>dfs.namenode.handler.count</name>
-        <value>60</value>
-    </property>
-    <property>
-        <name>ipc.client.connection.maxidletime</name>
-        <value>3600000</value>
-    </property>
-    <property>
-        <name>ipc.server.handler.queue.size</name>
-        <value>3300</value>
-    </property>
-    <property>
-        <name>ipc.client.connection</name>
-        <value>3</value>
-    </property>
-    <property>
-        <name>dfs.namenode.accesstime.precision</name>
-        <value>-1</value>
-    </property>
-    <property>
-        <name>dfs.datanode.http.address</name>
-        <value>0.0.0.0:59075</value>
-        <description>
-            The datanode http server address and port. If the port is 0 then the server will start on a free port.
-        </description>
-    </property>
-
-    <property>
-      <name>dfs.nameservices</name>
-      <value>gphd-cluster</value>
-    </property>
-    <property>
-      <name>dfs.ha.namenodes.gphd-cluster</name>
-      <value>nn1,nn2</value>
-    </property>
-    <property>
-      <name>dfs.namenode.rpc-address.gphd-cluster.nn1</name>
-      <value>%NAMENODE%:9000</value>
-    </property>
-    <property>
-      <name>dfs.namenode.rpc-address.gphd-cluster.nn2</name>
-      <value>%SECONDARY_NAMENODE%:9000</value>
-    </property>
-    <property>
-      <name>dfs.namenode.http-address.gphd-cluster.nn1</name>
-      <value>%NAMENODE%:50070</value>
-    </property>
-    <property>
-      <name>dfs.namenode.http-address.gphd-cluster.nn2</name>
-      <value>%SECONDARY_NAMENODE%:50070</value>
-    </property>
-    <property>
-      <name>dfs.namenode.shared.edits.dir</name>
-      <value>qjournal://%JOURNALNODE1%:8485;%JOURNALNODE2%:8485;%JOURNALNODE3%:8485/gphd-cluster</value>
-    </property>
-    <property>
-      <name>dfs.journalnode.edits.dir</name>
-      <value>%JOURNAL_DATA_ROOT%</value>
-    </property>
-    <property>
-      <name>dfs.client.failover.proxy.provider.gphd-cluster</name>
-      <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
-    </property>
-    <property>
-      <name>dfs.ha.fencing.methods</name>
-      <value>shell(/bin/true)</value>
-    </property>
-    <property>
-      <name>dfs.ha.fencing.ssh.connect-timeout</name>
-      <value>20000</value>
-    </property>
-    <property>
-      <name>dfs.ha.automatic-failover.enabled</name>
-      <value>true</value>
-    </property>
-</configuration>