You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hawq.apache.org by es...@apache.org on 2017/02/03 09:00:03 UTC

[01/50] [abbrv] incubator-hawq git commit: HAWQ-1243. Add suffix name for ranger restful service. [Forced Update!]

Repository: incubator-hawq
Updated Branches:
  refs/heads/2.1.0.0-incubating 2d0993dfd -> 12c7df017 (forced update)


HAWQ-1243. Add suffix name for ranger restful service.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/eed9a0f2
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/eed9a0f2
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/eed9a0f2

Branch: refs/heads/2.1.0.0-incubating
Commit: eed9a0f2535e7c7b39468252ee7ae184da009884
Parents: 3e3c41a
Author: hzhang2 <zh...@163.com>
Authored: Wed Dec 28 13:36:45 2016 +0800
Committer: hzhang2 <zh...@163.com>
Committed: Wed Dec 28 13:36:45 2016 +0800

----------------------------------------------------------------------
 src/backend/libpq/rangerrest.c | 13 ++-----------
 src/backend/utils/misc/guc.c   | 12 +++++++++++-
 src/include/utils/guc.h        |  1 +
 3 files changed, 14 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/eed9a0f2/src/backend/libpq/rangerrest.c
----------------------------------------------------------------------
diff --git a/src/backend/libpq/rangerrest.c b/src/backend/libpq/rangerrest.c
index e50c3e1..59b33a8 100644
--- a/src/backend/libpq/rangerrest.c
+++ b/src/backend/libpq/rangerrest.c
@@ -426,25 +426,16 @@ int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_TIMEOUT, 30L);
 
 	/* specify URL to get */
-	//curl_easy_setopt(curl_handle->curl_handle, CURLOPT_URL, "http://localhost:8089/checkprivilege");
 	StringInfoData tname;
 	initStringInfo(&tname);
 	appendStringInfo(&tname, "http://");
 	appendStringInfo(&tname, "%s", rps_addr_host);
 	appendStringInfo(&tname, ":");
 	appendStringInfo(&tname, "%d", rps_addr_port);
-	appendStringInfo(&tname, "/rps");
+	appendStringInfo(&tname, "/");
+	appendStringInfo(&tname, "%s", rps_addr_suffix);
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_URL, tname.data);
 
-	/* specify format */
-	// struct curl_slist *plist = curl_slist_append(NULL, "Content-Type:application/json;charset=UTF-8");
-	// curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, plist);
-
-
-	//curl_easy_setopt(curl_handle->curl_handle, CURLOPT_POSTFIELDSIZE_LARGE, 1000);
-	//curl_easy_setopt(curl_handle->curl_handle, CURLOPT_HTTPGET, 0);
-	//curl_easy_setopt(curl_handle->curl_handle, CURLOPT_CUSTOMREQUEST, "POST");
-
 	struct curl_slist *headers = NULL;
 	//curl_slist_append(headers, "Accept: application/json");
 	headers = curl_slist_append(headers, "Content-Type:application/json");

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/eed9a0f2/src/backend/utils/misc/guc.c
----------------------------------------------------------------------
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 3d36a72..00b9bad 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -780,6 +780,7 @@ bool gp_plpgsql_clear_cache_always = false;
 bool gp_called_by_pgdump = false;
 
 char   *rps_addr_host;
+char   *rps_addr_suffix;
 int     rps_addr_port;
 
 /*
@@ -6268,7 +6269,7 @@ static struct config_int ConfigureNamesInt[] =
       NULL
     },
     &rps_addr_port,
-    1, 1, 65535, NULL, NULL
+    8080, 1, 65535, NULL, NULL
   },
 
 	{
@@ -8184,6 +8185,15 @@ static struct config_string ConfigureNamesString[] =
     "localhost", NULL, NULL
   },
 
+  {
+    {"hawq_rps_address_suffix", PGC_POSTMASTER, PRESET_OPTIONS,
+      gettext_noop("ranger plugin server suffix of restful service address"),
+      NULL
+    },
+    &rps_addr_suffix,
+    "hawq", NULL, NULL
+  },
+
 	{
 		{"standby_address_host", PGC_POSTMASTER, PRESET_OPTIONS,
 			gettext_noop("standby server address hostname"),

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/eed9a0f2/src/include/utils/guc.h
----------------------------------------------------------------------
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index 95e14a4..cb45a7c 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -454,6 +454,7 @@ extern bool	optimizer_partition_selection_log;
  * rps host and port
  */
 extern char   *rps_addr_host;
+extern char   *rps_addr_suffix;
 extern int     rps_addr_port;
 /*
  * During insertion in a table with parquet partitions,


[05/50] [abbrv] incubator-hawq git commit: HAWQ-1242. hawq-site.xml default content has wrong guc variable names

Posted by es...@apache.org.
HAWQ-1242. hawq-site.xml default content has wrong guc variable names


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/61646cd5
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/61646cd5
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/61646cd5

Branch: refs/heads/2.1.0.0-incubating
Commit: 61646cd55fddb4ef3feafe9fd125763796d518c0
Parents: e25fe8b
Author: Yi <yj...@pivotal.io>
Authored: Fri Dec 30 13:15:07 2016 +1100
Committer: Yi <yj...@pivotal.io>
Committed: Fri Dec 30 13:15:07 2016 +1100

----------------------------------------------------------------------
 src/backend/utils/misc/etc/hawq-site.xml          | 8 ++++----
 src/backend/utils/misc/etc/template-hawq-site.xml | 8 ++++----
 2 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/61646cd5/src/backend/utils/misc/etc/hawq-site.xml
----------------------------------------------------------------------
diff --git a/src/backend/utils/misc/etc/hawq-site.xml b/src/backend/utils/misc/etc/hawq-site.xml
index b327ab4..012ccbf 100644
--- a/src/backend/utils/misc/etc/hawq-site.xml
+++ b/src/backend/utils/misc/etc/hawq-site.xml
@@ -33,7 +33,7 @@ under the License.
 	</property>
 
 	<property>
-		<name>hawq_standby_address_host</name>
+		<name>standby_address_host</name>
 		<value>none</value>
 		<description>The host name of hawq standby master.</description>
 	</property>
@@ -45,19 +45,19 @@ under the License.
 	</property>
 
 	<property>
-		<name>hawq_dfs_url</name>
+		<name>dfs_url</name>
 		<value>localhost:8020/hawq_default</value>
 		<description>URL for accessing HDFS.</description>
 	</property>
 
 	<property>
-		<name>hawq_master_directory</name>
+		<name>master_directory</name>
 		<value>~/hawq-data-directory/masterdd</value>
 		<description>The directory of hawq master.</description>
 	</property>
 
 	<property>
-		<name>hawq_segment_directory</name>
+		<name>segment_directory</name>
 		<value>~/hawq-data-directory/segmentdd</value>
 		<description>The directory of hawq segment.</description>
 	</property> 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/61646cd5/src/backend/utils/misc/etc/template-hawq-site.xml
----------------------------------------------------------------------
diff --git a/src/backend/utils/misc/etc/template-hawq-site.xml b/src/backend/utils/misc/etc/template-hawq-site.xml
index cf6273a..1901db3 100644
--- a/src/backend/utils/misc/etc/template-hawq-site.xml
+++ b/src/backend/utils/misc/etc/template-hawq-site.xml
@@ -33,7 +33,7 @@ under the License.
 	</property>
 
 	<property>
-		<name>hawq_standby_address_host</name>
+		<name>standby_address_host</name>
 		<value>%standby.host%</value>
 		<description>The host name of hawq standby master.</description>
 	</property>
@@ -45,19 +45,19 @@ under the License.
 	</property>
 
 	<property>
-		<name>hawq_dfs_url</name>
+		<name>dfs_url</name>
 		<value>%namenode.host%:%namenode.port%/%hawq.file.space%</value>
 		<description>URL for accessing HDFS.</description>
 	</property>
 
 	<property>
-		<name>hawq_master_directory</name>
+		<name>master_directory</name>
 		<value>%master.directory%</value>
 		<description>The directory of hawq master.</description>
 	</property>
 
 	<property>
-		<name>hawq_segment_directory</name>
+		<name>segment_directory</name>
 		<value>%segment.directory%</value>
 		<description>The directory of hawq segment.</description>
 	</property> 


[15/50] [abbrv] incubator-hawq git commit: HAWQ-1267. Update NOTICE file to reflect the right year for copyright

Posted by es...@apache.org.
HAWQ-1267. Update NOTICE file to reflect the right year for copyright


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/a8177153
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/a8177153
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/a8177153

Branch: refs/heads/2.1.0.0-incubating
Commit: a8177153b25123759d15411146c094dc350e4bab
Parents: bf4742c
Author: Ruilong Huo <rh...@pivotal.io>
Authored: Fri Jan 13 10:00:41 2017 +0800
Committer: Ruilong Huo <rh...@pivotal.io>
Committed: Fri Jan 13 10:00:41 2017 +0800

----------------------------------------------------------------------
 NOTICE | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/a8177153/NOTICE
----------------------------------------------------------------------
diff --git a/NOTICE b/NOTICE
index 6589c8f..1b1b2e7 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,5 +1,5 @@
 Apache HAWQ (incubating) 
-Copyright 2016 The Apache Software Foundation.
+Copyright 2017 The Apache Software Foundation.
 
 This product includes software developed at
 The Apache Software Foundation (http://www.apache.org/).


[12/50] [abbrv] incubator-hawq git commit: HAWQ-1256. non-superuser connect to db will do aclcheck through ranger and curl handle haven't been initialized.

Posted by es...@apache.org.
HAWQ-1256. non-superuser connect to db will do aclcheck through ranger and curl handle haven't been initialized.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/aa5792d8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/aa5792d8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/aa5792d8

Branch: refs/heads/2.1.0.0-incubating
Commit: aa5792d858f545815361c8247cdbb22eec9dd3f1
Parents: 2a7c20f
Author: stanlyxiang <st...@gmail.com>
Authored: Wed Jan 11 18:10:11 2017 +0800
Committer: stanlyxiang <st...@gmail.com>
Committed: Wed Jan 11 18:16:54 2017 +0800

----------------------------------------------------------------------
 src/backend/libpq/rangerrest.c |  1 +
 src/backend/tcop/postgres.c    | 38 ++++++++++++++++++-------------------
 2 files changed, 19 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/aa5792d8/src/backend/libpq/rangerrest.c
----------------------------------------------------------------------
diff --git a/src/backend/libpq/rangerrest.c b/src/backend/libpq/rangerrest.c
index fd8937a..5406251 100644
--- a/src/backend/libpq/rangerrest.c
+++ b/src/backend/libpq/rangerrest.c
@@ -384,6 +384,7 @@ int check_privilege_from_ranger(List *arg_list)
 	Assert(request != NULL);
 
 	/* call GET method to send request*/
+	Assert(curl_context_ranger.hasInited);
 	if (call_ranger_rest(&curl_context_ranger, request) < 0)
 	{
 		return RANGERCHECK_NO_PRIV;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/aa5792d8/src/backend/tcop/postgres.c
----------------------------------------------------------------------
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index c8d7e33..e1bfb1d 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -4391,7 +4391,24 @@ PostgresMain(int argc, char *argv[], const char *username)
 		BuildFlatFiles(true);
 	}
 
-
+	/* for enable ranger*/
+	if (enable_ranger && !curl_context_ranger.hasInited)
+	{
+		memset(&curl_context_ranger, 0, sizeof(curl_context_t));
+		curl_global_init(CURL_GLOBAL_ALL);
+		/* init the curl session */
+		curl_context_ranger.curl_handle = curl_easy_init();
+		if (curl_context_ranger.curl_handle == NULL) {
+			/* cleanup curl stuff */
+			/* no need to cleanup curl_handle since it's null. just cleanup curl global.*/
+			curl_global_cleanup();
+		}
+		curl_context_ranger.hasInited = true;
+		curl_context_ranger.response.buffer = palloc0(CURL_RES_BUFFER_SIZE);
+		curl_context_ranger.response.buffer_size = CURL_RES_BUFFER_SIZE;
+		elog(DEBUG3, "when enable ranger, init global struct for privileges check.");
+		on_proc_exit(curl_finalize, 0);
+	}
 	/*
 	 * Create a per-backend PGPROC struct in shared memory, except in the
 	 * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
@@ -4630,25 +4647,6 @@ PostgresMain(int argc, char *argv[], const char *username)
 	if (!ignore_till_sync)
 		send_ready_for_query = true;	/* initially, or after error */
 
-	/* for enable ranger*/
-	if (AmIMaster() && enable_ranger && !curl_context_ranger.hasInited)
-	{
-		memset(&curl_context_ranger, 0, sizeof(curl_context_t));
-		curl_global_init(CURL_GLOBAL_ALL);
-		/* init the curl session */
-		curl_context_ranger.curl_handle = curl_easy_init();
-		if (curl_context_ranger.curl_handle == NULL) {
-			/* cleanup curl stuff */
-			/* no need to cleanup curl_handle since it's null. just cleanup curl global.*/
-			curl_global_cleanup();
-			elog(ERROR, "initialize global curl context failed.");
-		}
-		curl_context_ranger.hasInited = true;
-		curl_context_ranger.response.buffer = palloc0(CURL_RES_BUFFER_SIZE);
-		curl_context_ranger.response.buffer_size = CURL_RES_BUFFER_SIZE;
-		elog(DEBUG3, "initialize global curl context for privileges check.");
-		on_proc_exit(curl_finalize, 0);
-	}
 	/*
 	 * Non-error queries loop here.
 	 */


[27/50] [abbrv] incubator-hawq git commit: HAWQ-1277. Fix build problem on CentOS 7, with --with-plperl.

Posted by es...@apache.org.
HAWQ-1277. Fix build problem on CentOS 7, with --with-plperl.

See earlier discussion on this on pgsql-hackers, message-id
9B946296-A2EB-4B45-A190-153F06662F8B@kineticode.com. We'd get this fix
eventually as we merge with later PostgreSQL versions, but let's make life
easier for people hitting this issue now.

This is a backpatch of the following upstream commit, sans the MSVC build
parts, as I have no environment to test that and we don't support Windows
anyway. We'll get the MSVC parts later, as we merge.

commit ba00ab0b111a0cbbac612e8ea8b0d5f96534102e
Author: Andrew Dunstan <an...@dunslane.net>
Date:   Sat Nov 26 15:22:32 2011 -0500

    Use the preferred version of xsubpp, not necessarily the one that came with the
    distro version of perl.

    David Wheeler and Alex Hunsaker.

    Backpatch to 9.1 where it applies cleanly. A simple workaround is available for earlier
    branches, and further effort doesn't seem warranted.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/afac2dfe
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/afac2dfe
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/afac2dfe

Branch: refs/heads/2.1.0.0-incubating
Commit: afac2dfe6d1be9434b93b5065c00b743c8243098
Parents: 368dbc9
Author: Ed Espino <ee...@pivotal.io>
Authored: Mon Jan 16 18:55:18 2017 -0800
Committer: Ed Espino <ee...@pivotal.io>
Committed: Mon Jan 16 21:38:02 2017 -0800

----------------------------------------------------------------------
 src/pl/plperl/GNUmakefile | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/afac2dfe/src/pl/plperl/GNUmakefile
----------------------------------------------------------------------
diff --git a/src/pl/plperl/GNUmakefile b/src/pl/plperl/GNUmakefile
index d480268..a1e0a05 100644
--- a/src/pl/plperl/GNUmakefile
+++ b/src/pl/plperl/GNUmakefile
@@ -56,6 +56,9 @@ endif
 # where to find psql for running the tests
 PSQLDIR = $(bindir)
 
+# where to find xsubpp for building XS.
+XSUBPPDIR = $(shell $(PERL) -e 'use List::Util qw(first); print first { -r "$$_/ExtUtils/xsubpp" } @INC')
+
 include $(top_srcdir)/src/Makefile.shlib
 
 plperl.o: perlchunks.h plperl_opmask.h
@@ -69,10 +72,10 @@ perlchunks.h: $(PERLCHUNKS)
 all: all-lib
 
 SPI.c: SPI.xs
-	$(PERL) $(perl_privlibexp)/ExtUtils/xsubpp -typemap $(perl_privlibexp)/ExtUtils/typemap $< >$@
-	
+	$(PERL) $(XSUBPPDIR)/ExtUtils/xsubpp -typemap $(perl_privlibexp)/ExtUtils/typemap $< >$@
+
 Util.c: Util.xs
-	$(PERL) $(perl_privlibexp)/ExtUtils/xsubpp -typemap $(perl_privlibexp)/ExtUtils/typemap $< >$@
+	$(PERL) $(XSUBPPDIR)/ExtUtils/xsubpp -typemap $(perl_privlibexp)/ExtUtils/typemap $< >$@
 
 install: all installdirs install-lib
 


[24/50] [abbrv] incubator-hawq git commit: HAWQ-1275. Check build-in catalogs, tables and functions in native aclcheck.

Posted by es...@apache.org.
HAWQ-1275. Check build-in catalogs, tables and functions in native aclcheck.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/cf54c418
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/cf54c418
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/cf54c418

Branch: refs/heads/2.1.0.0-incubating
Commit: cf54c41809627f5b9b38eb6322947ef12439b0af
Parents: 0bc2c8c
Author: hubertzhang <hu...@apache.org>
Authored: Mon Jan 16 16:01:39 2017 +0800
Committer: hubertzhang <hu...@apache.org>
Committed: Mon Jan 16 16:01:39 2017 +0800

----------------------------------------------------------------------
 src/backend/catalog/aclchk.c        | 43 ++++++++++++++++++++++++++------
 src/backend/utils/cache/lsyscache.c | 24 ++++++++++++++++++
 src/backend/utils/misc/guc.c        | 13 +++++++++-
 src/include/catalog/pg_namespace.h  |  1 -
 src/include/utils/guc.h             |  3 +++
 src/include/utils/lsyscache.h       |  1 +
 6 files changed, 76 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/cf54c418/src/backend/catalog/aclchk.c
----------------------------------------------------------------------
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 73de11b..200d9cb 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -2669,29 +2669,58 @@ List *getActionName(AclMode mask)
 
 bool fallBackToNativeCheck(AclObjectKind objkind, Oid obj_oid, Oid roleid)
 {
-  //for heap table, we fall back to native check.
-  if(objkind == ACL_KIND_CLASS)
+  /* get the latest information_schema_namespcace_oid. Since caql access heap table
+   * directly without aclcheck, this function will not be called recursively
+   */
+  if (information_schema_namespcace_oid == 0)
+  {
+	  information_schema_namespcace_oid = (int)get_namespace_oid("information_schema");
+  }
+  /*for heap table, we fall back to native check.*/
+  if (objkind == ACL_KIND_CLASS)
   {
     char relstorage = get_rel_relstorage(obj_oid);
-    if(relstorage == 'h')
+    if (relstorage == 'h')
+    {
+      return true;
+    }
+  }
+  else if (objkind == ACL_KIND_NAMESPACE)
+  {
+	/*native check build-in schemas.*/
+    if (obj_oid == PG_CATALOG_NAMESPACE || obj_oid == information_schema_namespcace_oid
+    		|| obj_oid == PG_AOSEGMENT_NAMESPACE || obj_oid == PG_TOAST_NAMESPACE
+			|| obj_oid == PG_BITMAPINDEX_NAMESPACE)
     {
       return true;
     }
   }
+  else if (objkind == ACL_KIND_PROC)
+  {
+	/*native check functions under build-in schemas.*/
+    Oid namespaceid = get_func_namespace(obj_oid);
+    if (namespaceid == PG_CATALOG_NAMESPACE || namespaceid == information_schema_namespcace_oid
+			|| namespaceid == PG_AOSEGMENT_NAMESPACE || namespaceid == PG_TOAST_NAMESPACE
+			|| namespaceid == PG_BITMAPINDEX_NAMESPACE)
+    {
+      return true;
+    }
+  }
+
   return false;
 }
 
 bool fallBackToNativeChecks(AclObjectKind objkind, List* table_list, Oid roleid)
 {
-  //for heap table, we fall back to native check.
-  if(objkind == ACL_KIND_CLASS)
+  /*we only have range table here*/
+  if (objkind == ACL_KIND_CLASS)
   {
     ListCell   *l;
     foreach(l, table_list)
     {
       RangeTblEntry *rte=(RangeTblEntry *) lfirst(l);
-      char relstorage = get_rel_relstorage(rte->relid);
-      if(relstorage == 'h')
+      bool ret = fallBackToNativeCheck(ACL_KIND_CLASS, rte->relid, roleid);
+      if(ret)
       {
         return true;
       }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/cf54c418/src/backend/utils/cache/lsyscache.c
----------------------------------------------------------------------
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index fa8fde5..3ccf847 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -3248,6 +3248,30 @@ get_namespace_name(Oid nspid)
 	return result;
 }
 
+/*
+ * get_namespace_oid
+ *		Returns the oid of a namespace given its name
+ *
+ */
+Oid
+get_namespace_oid(const char* npname)
+{
+	Oid			result;
+	int			fetchCount;
+
+	result = caql_getoid_plus(
+			NULL,
+			&fetchCount,
+			NULL,
+			cql("SELECT oid FROM pg_namespace "
+				" WHERE nspname = :1 ",
+				PointerGetDatum((char *) npname)));
+
+	if (!fetchCount)
+		return InvalidOid;
+
+	return result;
+}
 /*				---------- PG_AUTHID CACHE ----------					 */
 
 /*

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/cf54c418/src/backend/utils/misc/guc.c
----------------------------------------------------------------------
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index fbf19cf..21d705a 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -769,6 +769,8 @@ bool		optimizer_prefer_scalar_dqa_multistage_agg;
 bool		optimizer_parallel_union;
 bool		optimizer_array_constraints;
 
+int information_schema_namespcace_oid;
+
 /* Security */
 bool		gp_reject_internal_tcp_conn = true;
 
@@ -6195,6 +6197,15 @@ static struct config_int ConfigureNamesInt[] =
 	},
 
 	{
+		{"information_schema_namespcace_oid", PGC_USERSET, DEVELOPER_OPTIONS,
+			gettext_noop("the oid of information_schema namespace"),
+			NULL
+		},
+		&information_schema_namespcace_oid,
+		0, 0, INT_MAX, NULL, NULL
+	},
+
+	{
 		{"memory_profiler_dataset_size", PGC_USERSET, DEVELOPER_OPTIONS,
 			gettext_noop("Set the size in GB"),
 			NULL,
@@ -6269,7 +6280,7 @@ static struct config_int ConfigureNamesInt[] =
       NULL
     },
     &rps_addr_port,
-    8080, 1, 65535, NULL, NULL
+    8432, 1, 65535, NULL, NULL
   },
 
 	{

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/cf54c418/src/include/catalog/pg_namespace.h
----------------------------------------------------------------------
diff --git a/src/include/catalog/pg_namespace.h b/src/include/catalog/pg_namespace.h
index a91e2ea..1bedc70 100644
--- a/src/include/catalog/pg_namespace.h
+++ b/src/include/catalog/pg_namespace.h
@@ -124,7 +124,6 @@ DATA(insert OID = 6104 ( "pg_aoseg" PGUID _null_ 0));
 DESCR("Reserved schema for Append Only segment list and eof tables");
 #define PG_AOSEGMENT_NAMESPACE 6104
 
-
 #define IsBuiltInNameSpace(namespaceId) \
 	(namespaceId == PG_CATALOG_NAMESPACE || \
 	 namespaceId == PG_TOAST_NAMESPACE || \

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/cf54c418/src/include/utils/guc.h
----------------------------------------------------------------------
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index cb45a7c..2315778 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -445,6 +445,9 @@ extern bool optimizer_prefer_scalar_dqa_multistage_agg;
 extern bool optimizer_parallel_union;
 extern bool optimizer_array_constraints;
 
+
+extern int information_schema_namespcace_oid;
+
 /**
  * Enable logging of DPE match in optimizer.
  */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/cf54c418/src/include/utils/lsyscache.h
----------------------------------------------------------------------
diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h
index 21a38cf..6ee99be 100644
--- a/src/include/utils/lsyscache.h
+++ b/src/include/utils/lsyscache.h
@@ -148,6 +148,7 @@ extern void free_attstatsslot(Oid atttype,
 				  Datum *values, int nvalues,
 				  float4 *numbers, int nnumbers);
 extern char *get_namespace_name(Oid nspid);
+extern Oid get_namespace_oid(const char* npname);
 extern Oid	get_roleid(const char *rolname);
 extern char *get_rolname(Oid roleid);
 extern char get_relation_storage_type(Oid relid);


[02/50] [abbrv] incubator-hawq git commit: HAWQ-1237. Modify hard code 'select' privilege in create_ranger_request_json_batch() in rangerrest.c

Posted by es...@apache.org.
HAWQ-1237. Modify hard code 'select' privilege in create_ranger_request_json_batch() in rangerrest.c


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/4ca15874
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/4ca15874
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/4ca15874

Branch: refs/heads/2.1.0.0-incubating
Commit: 4ca158740fad957d5ffde0390757f64744eeefbe
Parents: eed9a0f
Author: Chunling Wang <wa...@126.com>
Authored: Tue Dec 27 10:11:13 2016 +0800
Committer: hzhang2 <zh...@163.com>
Committed: Wed Dec 28 17:12:42 2016 +0800

----------------------------------------------------------------------
 src/backend/libpq/rangerrest.c | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/4ca15874/src/backend/libpq/rangerrest.c
----------------------------------------------------------------------
diff --git a/src/backend/libpq/rangerrest.c b/src/backend/libpq/rangerrest.c
index 59b33a8..120f64f 100644
--- a/src/backend/libpq/rangerrest.c
+++ b/src/backend/libpq/rangerrest.c
@@ -200,14 +200,12 @@ json_object *create_ranger_request_json_batch(List *args)
 
 		json_object_object_add(jelement, "resource", jresource);
 
-		//ListCell *cell;
-		//foreach(cell, arg_ptr->actions)
-		//{
-		char tmp[7] = "select";
-		json_object* jaction = json_object_new_string((char *)tmp);
-		//json_object* jaction = json_object_new_string((char *)cell->data.ptr_value);
-		json_object_array_add(jactions, jaction);
-		//}
+		ListCell *cell;
+		foreach(cell, arg_ptr->actions)
+		{
+		    json_object* jaction = json_object_new_string((char *)cell->data.ptr_value);
+		    json_object_array_add(jactions, jaction);
+		}
 		json_object_object_add(jelement, "privileges", jactions);
 		json_object_array_add(jaccess, jelement);
 


[41/50] [abbrv] incubator-hawq git commit: HAWQ-1281. Refactored RPS integration tests

Posted by es...@apache.org.
HAWQ-1281. Refactored RPS integration tests

(closes #1100)


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/8a5e65bf
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/8a5e65bf
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/8a5e65bf

Branch: refs/heads/2.1.0.0-incubating
Commit: 8a5e65bffc52f6c36c0813ecd1825631fac3acba
Parents: eb2ea90
Author: Alexander Denissov <ad...@pivotal.io>
Authored: Fri Jan 20 11:19:40 2017 -0800
Committer: Alexander Denissov <ad...@pivotal.io>
Committed: Wed Jan 25 09:56:05 2017 -0800

----------------------------------------------------------------------
 .../integration/service/tests/DatabaseTest.java |  66 ++++----
 .../integration/service/tests/FunctionTest.java | 111 +++++++------
 .../integration/service/tests/LanguageTest.java |  96 +++++------
 .../integration/service/tests/ProtocolTest.java |  61 +++----
 .../integration/service/tests/RPSRequest.java   |  60 -------
 .../integration/service/tests/RPSResponse.java  |  42 -----
 .../integration/service/tests/SchemaTest.java   |  95 +++++++++++
 .../integration/service/tests/SequenceTest.java |  97 +++++++++++
 .../service/tests/ServiceBaseTest.java          | 116 -------------
 .../service/tests/SpecialPrivilegesTest.java    |  95 +++++++++++
 .../integration/service/tests/TableTest.java    |  96 +++++++++++
 .../service/tests/TablespaceTest.java           |  61 +++----
 .../ranger/integration/service/tests/Utils.java |  76 ---------
 .../tests/common/ComplexResourceTestBase.java   |  40 +++++
 .../service/tests/common/Policy.java            | 107 ++++++++++++
 .../service/tests/common/RESTClient.java        | 114 +++++++++++++
 .../service/tests/common/ServiceTestBase.java   | 162 +++++++++++++++++++
 .../tests/common/SimpleResourceTestBase.java    | 114 +++++++++++++
 .../src/test/resources/test-database.json       |  46 ------
 .../src/test/resources/test-function-2.json     |  40 -----
 .../src/test/resources/test-function.json       |  40 -----
 .../src/test/resources/test-language-2.json     |  35 ----
 .../src/test/resources/test-language.json       |  35 ----
 .../src/test/resources/test-protocol.json       |  33 ----
 .../src/test/resources/test-tablespace.json     |  30 ----
 25 files changed, 1107 insertions(+), 761 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/DatabaseTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/DatabaseTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/DatabaseTest.java
index 451a289..1e6557f 100644
--- a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/DatabaseTest.java
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/DatabaseTest.java
@@ -19,49 +19,43 @@
 
 package org.apache.hawq.ranger.integration.service.tests;
 
-import org.junit.Test;
+import org.apache.hawq.ranger.integration.service.tests.common.Policy;
+import org.apache.hawq.ranger.integration.service.tests.common.SimpleResourceTestBase;
+import org.junit.Before;
 
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
+import static org.apache.hawq.ranger.integration.service.tests.common.Policy.ResourceType.*;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+public class DatabaseTest extends SimpleResourceTestBase {
 
-public class DatabaseTest extends ServiceBaseTest {
+    // create-schema will be requested by HAWQ with only database in context, so it looks like a privilege for database resource
+    private static final String[] SPECIAL_PRIVILEGES = new String[] {"connect", "temp", "create-schema"};
 
-    private static final List<String> PRIVILEGES = Arrays.asList("connect", "temp");
-
-    public void beforeTest()
-            throws IOException {
-        createPolicy("test-database.json");
-        resources.put("database", "sirotan");
-    }
-
-    @Test
-    public void testDatabases_UserMaria_SirotanDb_Allowed()
-            throws IOException {
-        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Before
+    public void beforeTest() {
+        specificResource.put(database, TEST_DB);
+        unknownResource.put(database, UNKNOWN);
+        privileges = new String[] {"connect", "temp", "create"};
     }
 
-    @Test
-    public void testDatabases_UserMaria_DoesNotExistDb_Denied()
-            throws IOException {
-        resources.put("database", "doesnotexist");
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, STAR)
+                .resource(table, STAR)
+                .userAccess(TEST_USER, SPECIAL_PRIVILEGES)
+                .build();
+        return policy;
     }
 
-    @Test
-    public void testDatabases_UserBob_SirotanDb_Denied()
-            throws IOException {
-        assertFalse(hasAccess("bob", resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceGroupPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, STAR)
+                .resource(table, STAR)
+                .groupAccess(PUBLIC_GROUP, SPECIAL_PRIVILEGES)
+                .build();
+        return policy;
     }
-
-    @Test
-    public void testDatabases_UserMaria_SirotanDb_Denied()
-            throws IOException {
-        deletePolicy();
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
-    }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/FunctionTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/FunctionTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/FunctionTest.java
index 1253c38..ecdb67b 100644
--- a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/FunctionTest.java
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/FunctionTest.java
@@ -19,73 +19,78 @@
 
 package org.apache.hawq.ranger.integration.service.tests;
 
-import org.junit.Test;
+import org.apache.hawq.ranger.integration.service.tests.common.ComplexResourceTestBase;
+import org.apache.hawq.ranger.integration.service.tests.common.Policy;
+import org.junit.Before;
 
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
+import static org.apache.hawq.ranger.integration.service.tests.common.Policy.ResourceType.*;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+public class FunctionTest extends ComplexResourceTestBase {
 
-public class FunctionTest extends ServiceBaseTest {
+    @Before
+    public void beforeTest() {
+        specificResource.put(database, TEST_DB);
+        specificResource.put(schema, TEST_SCHEMA);
+        specificResource.put(function, TEST_FUNCTION);
 
-    private static final List<String> PRIVILEGES = Arrays.asList("execute");
+        parentUnknownResource.put(database, TEST_DB);
+        parentUnknownResource.put(schema, UNKNOWN);
+        parentUnknownResource.put(function, TEST_FUNCTION);
 
-    public void beforeTest()
-            throws IOException {
-        createPolicy("test-function.json");
-        resources.put("database", "sirotan");
-        resources.put("schema", "siroschema");
-        resources.put("function", "atan");
-    }
+        childUnknownResource.put(database, TEST_DB);
+        childUnknownResource.put(schema, TEST_SCHEMA);
+        childUnknownResource.put(function, UNKNOWN);
 
-    @Test
-    public void testFunctions_UserMaria_SirotanDb_AtanFunction_Allowed()
-            throws IOException {
-        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
-    }
+        unknownResource.put(database, UNKNOWN);
+        unknownResource.put(schema, UNKNOWN);
+        unknownResource.put(function, UNKNOWN);
 
-    @Test
-    public void testFunctions_UserMaria_OtherDb_AtanFunction_Denied()
-            throws IOException {
-        resources.put("database", "other");
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+        privileges = new String[] {"execute"};
     }
 
-    @Test
-    public void testFunctions_UserMaria_SirotanDb_DoesNotExistFunction_Denied()
-            throws IOException {
-        resources.put("function", "doesnotexist");
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(function, TEST_FUNCTION)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        return policy;
     }
 
-    @Test
-    public void testFunctions_UserBob_SirotanDb_AtanFunction_Denied()
-            throws IOException {
-        assertFalse(hasAccess("bob", resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceParentStarUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, STAR)
+                .resource(function, TEST_FUNCTION)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        policy.isParentStar = true;
+        return policy;
     }
 
-    @Test
-    public void testFunctions_UserMaria_SirotanDb_AtanFunction_Denied()
-            throws IOException {
-        deletePolicy();
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceChildStarUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(function, STAR)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        policy.isChildStar = true;
+        return policy;
     }
 
-    @Test
-    public void testFunctions_UserMaria_DoesNotExistDb_AtanFunction_Denied()
-            throws IOException {
-        resources.put("database", "doesnotexist");
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceGroupPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(function, TEST_FUNCTION)
+                .groupAccess(PUBLIC_GROUP, privileges)
+                .build();
+        return policy;
     }
-
-    @Test
-    public void testFunctions_UserMaria_SirotanDb_AtanFunction_Policy2_Allowed()
-            throws IOException {
-        deletePolicy();
-        createPolicy("test-function-2.json");
-        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
-    }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/LanguageTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/LanguageTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/LanguageTest.java
index 6eedb08..d39a595 100644
--- a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/LanguageTest.java
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/LanguageTest.java
@@ -19,65 +19,71 @@
 
 package org.apache.hawq.ranger.integration.service.tests;
 
-import org.junit.Test;
+import org.apache.hawq.ranger.integration.service.tests.common.ComplexResourceTestBase;
+import org.apache.hawq.ranger.integration.service.tests.common.Policy;
+import org.junit.Before;
 
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
+import static org.apache.hawq.ranger.integration.service.tests.common.Policy.ResourceType.database;
+import static org.apache.hawq.ranger.integration.service.tests.common.Policy.ResourceType.language;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+public class LanguageTest extends ComplexResourceTestBase {
 
-public class LanguageTest extends ServiceBaseTest {
+    @Before
+    public void beforeTest() {
+        specificResource.put(database, TEST_DB);
+        specificResource.put(language, TEST_LANGUAGE);
 
-    private static final List<String> PRIVILEGES = Arrays.asList("usage");
+        parentUnknownResource.put(database, UNKNOWN);
+        parentUnknownResource.put(language, TEST_LANGUAGE);
 
-    public void beforeTest()
-            throws IOException {
-        createPolicy("test-language.json");
-        resources.put("database", "sirotan");
-        resources.put("language", "sql");
-    }
+        childUnknownResource.put(database, TEST_DB);
+        childUnknownResource.put(language, UNKNOWN);
 
-    @Test
-    public void testLanguages_UserMaria_SirotanDb_SqlLanguage_Allowed()
-            throws IOException {
-        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
-    }
+        unknownResource.put(database, UNKNOWN);
+        unknownResource.put(language, UNKNOWN);
 
-    @Test
-    public void testLanguages_UserMaria_SirotanDb_DoesNotExistLanguage_Denied()
-            throws IOException {
-        resources.put("language", "doesnotexist");
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+        privileges = new String[] {"usage"};
     }
 
-    @Test
-    public void testLanguages_UserBob_SirotanDb_SqlLanguage_Denied()
-            throws IOException {
-        assertFalse(hasAccess("bob", resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(language, TEST_LANGUAGE)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        return policy;
     }
 
-    @Test
-    public void testLanguages_UserMaria_SirotanDb_SqlLanguage_Denied()
-            throws IOException {
-        deletePolicy();
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceParentStarUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, STAR)
+                .resource(language, TEST_LANGUAGE)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        policy.isParentStar = true;
+        return policy;
     }
 
-    @Test
-    public void testLanguages_UserMaria_DoesNotExistDb_SqlLanguage_Denied()
-            throws IOException {
-        resources.put("database", "doesnotexist");
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceChildStarUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(language, STAR)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        policy.isChildStar = true;
+        return policy;
     }
 
-    @Test
-    public void testLanguages_UserMaria_SirotanDb_SqlLanguage_Policy2_Allowed()
-            throws IOException {
-        deletePolicy();
-        createPolicy("test-language-2.json");
-        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceGroupPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(language, TEST_LANGUAGE)
+                .groupAccess(PUBLIC_GROUP, privileges)
+                .build();
+        return policy;
     }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ProtocolTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ProtocolTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ProtocolTest.java
index f0e5c99..e67a0d3 100644
--- a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ProtocolTest.java
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ProtocolTest.java
@@ -19,49 +19,36 @@
 
 package org.apache.hawq.ranger.integration.service.tests;
 
-import org.junit.Test;
+import org.apache.hawq.ranger.integration.service.tests.common.Policy;
+import org.apache.hawq.ranger.integration.service.tests.common.SimpleResourceTestBase;
+import org.junit.Before;
 
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
+import static org.apache.hawq.ranger.integration.service.tests.common.Policy.ResourceType.protocol;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+public class ProtocolTest extends SimpleResourceTestBase {
 
-public class ProtocolTest extends ServiceBaseTest {
-
-    private static final List<String> PRIVILEGES = Arrays.asList("select", "insert");
-
-    public void beforeTest()
-            throws IOException {
-        createPolicy("test-protocol.json");
-        resources.put("protocol", "pxf");
-    }
-
-    @Test
-    public void testProtocols_UserMaria_PxfProtocol_Allowed()
-            throws IOException {
-        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Before
+    public void beforeTest() {
+        specificResource.put(protocol, TEST_PROTOCOL);
+        unknownResource.put(protocol, UNKNOWN);
+        privileges = new String[] {"select", "insert"};
     }
 
-    @Test
-    public void testProtocols_UserMaria_DoesNotExistProtocol_Denied()
-            throws IOException {
-        resources.put("protocol", "doesnotexist");
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(protocol, TEST_PROTOCOL)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        return policy;
     }
 
-    @Test
-    public void testProtocols_UserBob_PxfProtocol_Denied()
-            throws IOException {
-        assertFalse(hasAccess("bob", resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceGroupPolicy() {
+        Policy policy = policyBuilder
+                .resource(protocol, TEST_PROTOCOL)
+                .groupAccess(PUBLIC_GROUP, privileges)
+                .build();
+        return policy;
     }
-
-    @Test
-    public void testProtocols_UserMaria_PxfProtocol_Denied()
-            throws IOException {
-        deletePolicy();
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
-    }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSRequest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSRequest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSRequest.java
deleted file mode 100644
index 7e7787a..0000000
--- a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSRequest.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hawq.ranger.integration.service.tests;
-
-import org.codehaus.jackson.map.ObjectMapper;
-
-import java.io.IOException;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-public class RPSRequest {
-
-    String user;
-    Map<String, String> resources;
-    List<String> privileges;
-
-    public RPSRequest(String user,
-                      Map<String, String> resources,
-                      List<String> privileges) {
-        this.user = user;
-        this.resources = resources;
-        this.privileges = privileges;
-    }
-
-    public String getJsonString()
-            throws IOException {
-
-        Map<String, Object> request = new HashMap<>();
-        request.put("requestId", 9);
-        request.put("user", user);
-        request.put("clientIp", "123.0.0.21");
-        request.put("context", "CREATE DATABASE sirotan;");
-        Map<String, Object> accessHash = new HashMap<>();
-        accessHash.put("resource", resources);
-        accessHash.put("privileges", privileges);
-        request.put("access", Arrays.asList(accessHash));
-        return new ObjectMapper().writeValueAsString(request);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSResponse.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSResponse.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSResponse.java
deleted file mode 100644
index 2ed1046..0000000
--- a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSResponse.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hawq.ranger.integration.service.tests;
-
-import org.codehaus.jackson.annotate.JsonProperty;
-
-import java.util.List;
-import java.util.Map;
-
-public class RPSResponse {
-
-    @JsonProperty
-    public int requestId;
-
-    @JsonProperty
-    public List<Map<String, Object>> access;
-
-    public List<Map<String, Object>> getAccess() {
-        return access;
-    }
-
-    public boolean hasAccess() {
-        return (boolean) access.get(0).get("allowed");
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SchemaTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SchemaTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SchemaTest.java
new file mode 100644
index 0000000..b3dff37
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SchemaTest.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.apache.hawq.ranger.integration.service.tests.common.ComplexResourceTestBase;
+import org.apache.hawq.ranger.integration.service.tests.common.Policy;
+import org.junit.Before;
+
+import static org.apache.hawq.ranger.integration.service.tests.common.Policy.ResourceType.*;
+
+public class SchemaTest extends ComplexResourceTestBase {
+
+    // for schema only, privileges in policy must have -schema suffix added, create-schema is covered as part of DatabaseTest
+    private static final String[] SPECIAL_PRIVILEGES = new String[] {"usage-schema"};
+
+    @Before
+    public void beforeTest() {
+        specificResource.put(database, TEST_DB);
+        specificResource.put(schema, TEST_SCHEMA);
+
+        parentUnknownResource.put(database, UNKNOWN);
+        parentUnknownResource.put(schema, TEST_SCHEMA);
+
+        childUnknownResource.put(database, TEST_DB);
+        childUnknownResource.put(schema, UNKNOWN);
+
+        unknownResource.put(database, UNKNOWN);
+        unknownResource.put(schema, UNKNOWN);
+
+        privileges = new String[] {"usage"};
+    }
+
+    @Override
+    protected Policy getResourceUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(table, STAR)
+                .userAccess(TEST_USER, SPECIAL_PRIVILEGES)
+                .build();
+        return policy;
+    }
+
+    @Override
+    protected Policy getResourceParentStarUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, STAR)
+                .resource(schema, TEST_SCHEMA)
+                .resource(table, STAR)
+                .userAccess(TEST_USER, SPECIAL_PRIVILEGES)
+                .build();
+        policy.isParentStar = true;
+        return policy;
+    }
+
+    @Override
+    protected Policy getResourceChildStarUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, STAR)
+                .resource(table, STAR)
+                .userAccess(TEST_USER, SPECIAL_PRIVILEGES)
+                .build();
+        policy.isChildStar = true;
+        return policy;
+    }
+
+    @Override
+    protected Policy getResourceGroupPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(table, STAR)
+                .groupAccess(PUBLIC_GROUP, SPECIAL_PRIVILEGES)
+                .build();
+        return policy;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SequenceTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SequenceTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SequenceTest.java
new file mode 100644
index 0000000..5add94c
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SequenceTest.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.apache.hawq.ranger.integration.service.tests.common.ComplexResourceTestBase;
+import org.apache.hawq.ranger.integration.service.tests.common.Policy;
+import org.junit.Before;
+
+import static org.apache.hawq.ranger.integration.service.tests.common.Policy.ResourceType.*;
+
+public class SequenceTest extends ComplexResourceTestBase {
+
+    @Before
+    public void beforeTest() {
+        specificResource.put(database, TEST_DB);
+        specificResource.put(schema, TEST_SCHEMA);
+        specificResource.put(sequence, TEST_SEQUENCE);
+
+        parentUnknownResource.put(database, TEST_DB);
+        parentUnknownResource.put(schema, UNKNOWN);
+        parentUnknownResource.put(sequence, TEST_SEQUENCE);
+
+        childUnknownResource.put(database, TEST_DB);
+        childUnknownResource.put(schema, TEST_SCHEMA);
+        childUnknownResource.put(sequence, UNKNOWN);
+
+        unknownResource.put(database, UNKNOWN);
+        unknownResource.put(schema, UNKNOWN);
+        unknownResource.put(sequence, UNKNOWN);
+
+        privileges = new String[] {"select", "update", "usage"};
+    }
+
+    @Override
+    protected Policy getResourceUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(sequence, TEST_SEQUENCE)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        return policy;
+    }
+
+    @Override
+
+    protected Policy getResourceParentStarUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, STAR)
+                .resource(sequence, TEST_SEQUENCE)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        policy.isParentStar = true;
+        return policy;
+    }
+
+    @Override
+    protected Policy getResourceChildStarUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(sequence, STAR)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        policy.isChildStar = true;
+        return policy;
+    }
+
+    @Override
+    protected Policy getResourceGroupPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(sequence, TEST_SEQUENCE)
+                .groupAccess(PUBLIC_GROUP, privileges)
+                .build();
+        return policy;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ServiceBaseTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ServiceBaseTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ServiceBaseTest.java
deleted file mode 100644
index 8608584..0000000
--- a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ServiceBaseTest.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hawq.ranger.integration.service.tests;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.entity.StringEntity;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.rules.TestName;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-public abstract class ServiceBaseTest {
-
-    protected final Log log = LogFactory.getLog(this.getClass());
-
-    @Rule
-    public final TestName testName = new TestName();
-    protected final String policyName = getClass().getSimpleName();
-    protected Map<String, String> resources = new HashMap<>();
-
-    public static String RANGER_PLUGIN_SERVICE_HOST = "localhost";
-    public static String RANGER_PLUGIN_SERVICE_PORT = "8432";
-    public static String RANGER_PLUGIN_SERVICE_URL =
-        "http://" + RANGER_PLUGIN_SERVICE_HOST + ":" + RANGER_PLUGIN_SERVICE_PORT + "/rps";
-    public static String RANGER_ADMIN_HOST = "localhost";
-    public static String RANGER_ADMIN_PORT = "6080";
-    public static String RANGER_URL =
-        "http://" + RANGER_ADMIN_HOST + ":" + RANGER_ADMIN_PORT + "/service/public/v2/api";
-    public static String RANGER_TEST_USER = "maria_dev";
-    public static int    POLICY_REFRESH_INTERVAL = 6000;
-
-    @Before
-    public void setUp()
-            throws IOException {
-        log.info("======================================================================================");
-        log.info("Running test " + testName.getMethodName());
-        log.info("======================================================================================");
-        beforeTest();
-    }
-
-    @After
-    public void tearDown()
-            throws IOException {
-        deletePolicy();
-    }
-
-    protected void createPolicy(String jsonFile)
-            throws IOException {
-
-        log.info("Creating policy " + policyName);
-        HttpPost httpPost = new HttpPost(RANGER_URL + "/policy");
-        httpPost.setEntity(new StringEntity(Utils.getPayload(jsonFile)));
-        Utils.processHttpRequest(httpPost);
-        waitForPolicyRefresh();
-    }
-
-    protected void deletePolicy()
-            throws IOException {
-
-        log.info("Deleting policy " + policyName);
-        String requestUrl = RANGER_URL + "/policy?servicename=hawq&policyname=" + policyName;
-        Utils.processHttpRequest(new HttpDelete(requestUrl));
-        waitForPolicyRefresh();
-    }
-
-    protected boolean hasAccess(String user,
-                                Map<String, String> resources,
-                                List<String> privileges)
-            throws IOException {
-
-        log.info("Checking access for user " + user);
-        RPSRequest request = new RPSRequest(user, resources, privileges);
-        HttpPost httpPost = new HttpPost(RANGER_PLUGIN_SERVICE_URL);
-        httpPost.setEntity(new StringEntity(request.getJsonString()));
-        String result = Utils.processHttpRequest(httpPost);
-        RPSResponse rpsResponse = Utils.getResponse(result);
-        return rpsResponse.hasAccess();
-    }
-
-    private void waitForPolicyRefresh() {
-
-        try {
-            Thread.sleep(POLICY_REFRESH_INTERVAL);
-        }
-        catch (InterruptedException e) {
-            log.error(e);
-        }
-    }
-
-    public abstract void beforeTest() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SpecialPrivilegesTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SpecialPrivilegesTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SpecialPrivilegesTest.java
new file mode 100644
index 0000000..ac1727f
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/SpecialPrivilegesTest.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.apache.hawq.ranger.integration.service.tests.common.Policy;
+import org.apache.hawq.ranger.integration.service.tests.common.ServiceTestBase;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.hawq.ranger.integration.service.tests.common.Policy.ResourceType.*;
+
+public class SpecialPrivilegesTest extends ServiceTestBase {
+
+    private final String[] privilegeUsageSchema = new String[] {"usage-schema"};
+    private final String[] privilegeUsage = new String[] {"usage"};
+
+
+    private Map<Policy.ResourceType, String> schemaResource;
+    private Map<Policy.ResourceType, String> sequenceResource;
+
+    @Before
+    public void beforeTest() {
+        // resource used for lookup from RPS
+        schemaResource = new HashMap<>();
+        schemaResource.put(database, TEST_DB);
+        schemaResource.put(schema, TEST_SCHEMA);
+
+        sequenceResource = new HashMap<>();
+        sequenceResource.put(database, TEST_DB);
+        sequenceResource.put(schema, TEST_SCHEMA);
+        sequenceResource.put(sequence, TEST_SEQUENCE);
+    }
+
+
+    @Test
+    public void testUsageSchemaPrivilege() throws IOException {
+        // define policy for "usage-schema" on db:schema:*
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(sequence, STAR)
+                .userAccess(TEST_USER, privilegeUsageSchema)
+                .build();
+        createPolicy(policy);
+        try {
+            // user should have access to usage on schema
+            checkUserHasResourceAccess(TEST_USER, schemaResource, privilegeUsage);
+            // user should have NO access to usage on sequence
+            checkUserDeniedResourceAccess(TEST_USER, sequenceResource, privilegeUsage);
+        } finally {
+            deletePolicy(policy);
+        }
+    }
+
+    @Test
+    public void testUsagePrivilege() throws IOException {
+        // define policy for "usage" on db:schema:*
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(sequence, STAR)
+                .userAccess(TEST_USER, privilegeUsage)
+                .build();
+        createPolicy(policy);
+        try {
+            // user should have NO access to usage on schema
+            checkUserDeniedResourceAccess(TEST_USER, schemaResource, privilegeUsage);
+            // user should have access to usage on sequence
+            checkUserHasResourceAccess(TEST_USER, sequenceResource, privilegeUsage);
+        } finally {
+            deletePolicy(policy);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TableTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TableTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TableTest.java
new file mode 100644
index 0000000..742b91c
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TableTest.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.apache.hawq.ranger.integration.service.tests.common.ComplexResourceTestBase;
+import org.apache.hawq.ranger.integration.service.tests.common.Policy;
+import org.junit.Before;
+
+import static org.apache.hawq.ranger.integration.service.tests.common.Policy.ResourceType.*;
+
+public class TableTest extends ComplexResourceTestBase {
+
+    @Before
+    public void beforeTest() {
+        specificResource.put(database, TEST_DB);
+        specificResource.put(schema, TEST_SCHEMA);
+        specificResource.put(table, TEST_TABLE);
+
+        parentUnknownResource.put(database, TEST_DB);
+        parentUnknownResource.put(schema, UNKNOWN);
+        parentUnknownResource.put(table, TEST_TABLE);
+
+        childUnknownResource.put(database, TEST_DB);
+        childUnknownResource.put(schema, TEST_SCHEMA);
+        childUnknownResource.put(table, UNKNOWN);
+
+        unknownResource.put(database, UNKNOWN);
+        unknownResource.put(schema, UNKNOWN);
+        unknownResource.put(table, UNKNOWN);
+
+        privileges = new String[] {"select", "insert", "update", "delete", "references"};
+    }
+
+    @Override
+    protected Policy getResourceUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(table, TEST_TABLE)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        return policy;
+    }
+
+    @Override
+    protected Policy getResourceParentStarUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, STAR)
+                .resource(table, TEST_TABLE)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        policy.isParentStar = true;
+        return policy;
+    }
+
+    @Override
+    protected Policy getResourceChildStarUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(table, STAR)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        policy.isChildStar = true;
+        return policy;
+    }
+
+    @Override
+    protected Policy getResourceGroupPolicy() {
+        Policy policy = policyBuilder
+                .resource(database, TEST_DB)
+                .resource(schema, TEST_SCHEMA)
+                .resource(table, TEST_TABLE)
+                .groupAccess(PUBLIC_GROUP, privileges)
+                .build();
+        return policy;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TablespaceTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TablespaceTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TablespaceTest.java
index cfc41cb..f8834b5 100644
--- a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TablespaceTest.java
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TablespaceTest.java
@@ -19,49 +19,36 @@
 
 package org.apache.hawq.ranger.integration.service.tests;
 
-import org.junit.Test;
+import org.apache.hawq.ranger.integration.service.tests.common.Policy;
+import org.apache.hawq.ranger.integration.service.tests.common.SimpleResourceTestBase;
+import org.junit.Before;
 
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
+import static org.apache.hawq.ranger.integration.service.tests.common.Policy.ResourceType.tablespace;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+public class TablespaceTest extends SimpleResourceTestBase {
 
-public class TablespaceTest extends ServiceBaseTest {
-
-    private static final List<String> PRIVILEGES = Arrays.asList("create");
-
-    public void beforeTest()
-            throws IOException {
-        createPolicy("test-tablespace.json");
-        resources.put("tablespace", "pg_global");
-    }
-
-    @Test
-    public void testTablespaces_UserMaria_PgGlobalTablespace_Allowed()
-            throws IOException {
-        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Before
+    public void beforeTest() {
+        specificResource.put(tablespace, TEST_TABLESPACE);
+        unknownResource.put(tablespace, UNKNOWN);
+        privileges = new String[] {"create"};
     }
 
-    @Test
-    public void testTablespaces_UserMaria_DoesNotExistTablespace_Denied()
-            throws IOException {
-        resources.put("tablespace", "doesnotexist");
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceUserPolicy() {
+        Policy policy = policyBuilder
+                .resource(tablespace, TEST_TABLESPACE)
+                .userAccess(TEST_USER, privileges)
+                .build();
+        return policy;
     }
 
-    @Test
-    public void testTablespaces_UserBob_PgGlobalTablespace_Denied()
-            throws IOException {
-        assertFalse(hasAccess("bob", resources, PRIVILEGES));
+    @Override
+    protected Policy getResourceGroupPolicy() {
+        Policy policy = policyBuilder
+                .resource(tablespace, TEST_TABLESPACE)
+                .groupAccess(PUBLIC_GROUP, privileges)
+                .build();
+        return policy;
     }
-
-    @Test
-    public void testTablespaces_UserMaria_PgGlobalTablespace_Denied()
-            throws IOException {
-        deletePolicy();
-        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
-    }
-
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/Utils.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/Utils.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/Utils.java
deleted file mode 100644
index 971e513..0000000
--- a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/Utils.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hawq.ranger.integration.service.tests;
-
-import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpRequestBase;
-import org.apache.http.impl.client.HttpClientBuilder;
-import org.codehaus.jackson.map.ObjectMapper;
-
-import java.io.IOException;
-
-public class Utils {
-
-    protected static final Log log = LogFactory.getLog(Utils.class);
-
-    public static String getPayload(String jsonFile)
-            throws IOException {
-        return IOUtils.toString(Utils.class.getClassLoader().getResourceAsStream(jsonFile));
-    }
-
-    public static String getEncoding() {
-        return Base64.encodeBase64String("admin:admin".getBytes());
-    }
-
-    public static String processHttpRequest(HttpRequestBase request)
-            throws IOException {
-
-        if (log.isDebugEnabled()) {
-            log.debug("Request URI = " + request.getURI().toString());
-        }
-        request.setHeader("Authorization", "Basic " + getEncoding());
-        request.setHeader("Content-Type", "application/json");
-        HttpClient httpClient = HttpClientBuilder.create().build();
-        HttpResponse response = httpClient.execute(request);
-        int responseCode = response.getStatusLine().getStatusCode();
-        log.info("Response Code = " + responseCode);
-        HttpEntity entity = response.getEntity();
-        if (entity != null) {
-            String result = IOUtils.toString(entity.getContent());
-            if (log.isDebugEnabled()) {
-                log.debug(result);
-            }
-            return result;
-        }
-        return null;
-    }
-
-    public static RPSResponse getResponse(String result)
-            throws IOException {
-        return new ObjectMapper().readValue(result, RPSResponse.class);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/ComplexResourceTestBase.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/ComplexResourceTestBase.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/ComplexResourceTestBase.java
new file mode 100644
index 0000000..f49c18b
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/ComplexResourceTestBase.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests.common;
+
+import org.junit.Test;
+
+import java.io.IOException;
+
+public abstract class ComplexResourceTestBase extends SimpleResourceTestBase {
+
+    @Test
+    public void testParentStarResourceUserPolicy() throws IOException {
+        checkResourceUserPolicy(getResourceParentStarUserPolicy());
+    }
+
+    @Test
+    public void testChildStarResourceUserPolicy() throws IOException {
+        checkResourceUserPolicy(getResourceChildStarUserPolicy());
+    }
+
+    abstract protected Policy getResourceParentStarUserPolicy();
+    abstract protected Policy getResourceChildStarUserPolicy();
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/Policy.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/Policy.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/Policy.java
new file mode 100644
index 0000000..7d8f4b5
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/Policy.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests.common;
+
+import java.util.*;
+
+public class Policy {
+
+    public enum ResourceType {
+        database, schema, table, function, sequence, tablespace, language, protocol;
+    }
+
+    public static class ResourceValue {
+        public Set<String> values = new HashSet<>();
+        public Boolean isExcludes = false;
+        public Boolean isRecursive = false;
+
+        public ResourceValue(String... values) {
+            this.values.addAll(Arrays.asList(values));
+        }
+    }
+
+    public static class Access {
+        public String type;
+        public Boolean isAllowed = true;
+        public Access(String type) {
+            this.type = type;
+        }
+    }
+
+    public static class PolicyItem {
+        public Set<Access> accesses = new HashSet<>();
+        public Set<String> users = new HashSet<>();
+        public Set<String> groups = new HashSet<>();
+        public Set<String> conditions = new HashSet<>();
+        public Boolean delegateAdmin = true;
+        public PolicyItem(String[] privileges) {
+            for (String privilege : privileges) {
+                this.accesses.add(new Access(privilege));
+            }
+        }
+    }
+
+    public Boolean isEnabled = true;
+    public String service = "hawq";
+    public String name;
+    public Integer policyType = 0;
+    public String description = "Test policy";
+    public Boolean isAuditEnabled = true;
+    public Map<ResourceType, ResourceValue> resources = new HashMap<>();
+    public Set<PolicyItem> policyItems = new HashSet<>();
+    public Set<Object> denyPolicyItems = new HashSet<>();
+    public Set<Object> allowExceptions = new HashSet<>();
+    public Set<Object> denyExceptions = new HashSet<>();
+    public Set<Object> dataMaskPolicyItems = new HashSet<>();
+    public Set<Object> rowFilterPolicyItems = new HashSet<>();
+
+    // do not serialize into JSON
+    public transient boolean isParentStar = false;
+    public transient boolean isChildStar = false;
+
+    public static class PolicyBuilder {
+        private Policy policy = new Policy();
+
+        public PolicyBuilder name(String name) {
+            policy.name = name;
+            policy.description = "Test Policy for " + name;
+            return this;
+        }
+        public PolicyBuilder resource(ResourceType type, String value) {
+            policy.resources.put(type, new ResourceValue(value));
+            return this;
+        }
+        public PolicyBuilder userAccess(String user, String... privileges) {
+            PolicyItem policyItem = new PolicyItem(privileges);
+            policyItem.users.add(user);
+            policy.policyItems.add(policyItem);
+            return this;
+        }
+        public PolicyBuilder groupAccess(String group, String... privileges) {
+            PolicyItem policyItem = new PolicyItem(privileges);
+            policyItem.groups.add(group);
+            policy.policyItems.add(policyItem);
+            return this;
+        }
+        public Policy build() {
+            return policy;
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/RESTClient.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/RESTClient.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/RESTClient.java
new file mode 100644
index 0000000..ee7cb6e
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/RESTClient.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests.common;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpStatus;
+import org.apache.http.client.ClientProtocolException;
+import org.apache.http.client.methods.*;
+import org.apache.http.entity.ContentType;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.http.util.EntityUtils;
+
+import java.io.IOException;
+
+
+public class RESTClient {
+
+    private static final Log LOG = LogFactory.getLog(RESTClient.class);
+    private static final String AUTH_HEADER = getAuthorizationHeader();
+
+    private CloseableHttpClient httpClient;
+
+    private static String getAuthorizationHeader() {
+        return "Basic " + Base64.encodeBase64String("admin:admin".getBytes());
+    }
+
+    public RESTClient() {
+        httpClient = HttpClients.createDefault();
+    }
+
+    public String executeRequest(Method method, String url) throws IOException {
+        return executeRequest(method, url, null);
+    }
+
+    public String executeRequest(Method method, String url, String payload) throws IOException {
+        HttpUriRequest request = null;
+        switch (method) {
+            case GET:
+                request = new HttpGet(url);
+                break;
+            case POST:
+                request = new HttpPost(url);
+                ((HttpPost) request).setEntity(new StringEntity(payload));
+                break;
+            case DELETE:
+                request = new HttpDelete(url);
+                break;
+            default:
+                throw new IllegalArgumentException("Method " + method + " is not supported");
+        }
+        return executeRequest(request);
+    }
+
+    private String executeRequest(HttpUriRequest request) throws IOException {
+
+        LOG.debug("--> request URI = " + request.getURI());
+
+        request.setHeader("Authorization", AUTH_HEADER);
+        request.setHeader("Content-Type", ContentType.APPLICATION_JSON.toString());
+
+        CloseableHttpResponse response = httpClient.execute(request);
+        String payload = null;
+        try {
+            int responseCode = response.getStatusLine().getStatusCode();
+            LOG.debug("<-- response code = " + responseCode);
+
+            HttpEntity entity = response.getEntity();
+            if (entity != null) {
+                payload = EntityUtils.toString(response.getEntity());
+            }
+            LOG.debug("<-- response payload = " + payload);
+
+            if (responseCode == HttpStatus.SC_NOT_FOUND) {
+                throw new ResourceNotFoundException();
+            } else if (responseCode >= 300) {
+                throw new ClientProtocolException("Unexpected HTTP response code = " + responseCode);
+            }
+        } finally {
+            response.close();
+        }
+
+        return payload;
+    }
+
+    public static class ResourceNotFoundException extends IOException {
+
+    }
+
+    public enum Method {
+        GET, POST, PUT, DELETE;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/ServiceTestBase.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/ServiceTestBase.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/ServiceTestBase.java
new file mode 100644
index 0000000..0b3be56
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/ServiceTestBase.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests.common;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hawq.ranger.integration.service.tests.common.Policy.PolicyBuilder;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.type.TypeReference;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public abstract class ServiceTestBase {
+
+    protected Log LOG = LogFactory.getLog(this.getClass());
+
+    @Rule
+    public final TestName testName = new TestName();
+
+    protected static final String PUBLIC_GROUP = "public";
+    protected static final String GPADMIN_USER = "gpadmin";
+    protected static final String TEST_USER = "maria_dev";
+    protected static final String UNKNOWN = "unknown";
+    protected static final String STAR = "*";
+
+    protected static final String TEST_DB = "test-db";
+    protected static final String TEST_SCHEMA = "test-schema";
+    protected static final String TEST_TABLE = "test-table";
+    protected static final String TEST_FUNCTION = "test-function";
+    protected static final String TEST_SEQUENCE = "test-sequence";
+    protected static final String TEST_LANGUAGE = "test-language";
+    protected static final String TEST_PROTOCOL = "test-protocol";
+    protected static final String TEST_TABLESPACE = "test-tablespace";
+
+    protected PolicyBuilder policyBuilder;
+
+    private static final String RPS_HOST = "localhost";
+    private static final String RPS_PORT = "8432";
+    private static final String RPS_URL = String.format("http://%s:%s/rps", RPS_HOST, RPS_PORT);
+
+    private static final String RANGER_HOST = "localhost";
+    private static final String RANGER_PORT = "6080";
+    private static final String RANGER_URL = String.format("http://%s:%s/service/public/v2/api", RANGER_HOST, RANGER_PORT);
+    private static final String RANGER_POLICY_URL = RANGER_URL + "/policy";
+
+    private static final int POLICY_REFRESH_INTERVAL = 6000;
+    private static final TypeReference<HashMap<String,Object>> typeMSO = new TypeReference<HashMap<String,Object>>() {};
+
+    private RESTClient rest = new RESTClient();
+    private ObjectMapper mapper = new ObjectMapper();
+
+    @Before
+    public void setUp() throws IOException {
+        LOG.info("======================================================================================");
+        LOG.info("Running test " + testName.getMethodName());
+        LOG.info("======================================================================================");
+
+        policyBuilder = (new PolicyBuilder()).name(getClass().getSimpleName());
+    }
+
+    protected void checkUserHasResourceAccess(String user, Map<Policy.ResourceType, String> resource, String[] privileges) throws IOException {
+        // user IN the policy --> has all possible privileges to the specific resource
+        LOG.debug(String.format("Asserting user %s HAS access %s privileges %s", user, resource, Arrays.toString(privileges)));
+        assertTrue(hasAccess(user, resource, privileges));
+        for (String privilege : privileges) {
+            // user IN the policy --> has individual privileges to the specific resource
+            LOG.debug(String.format("Asserting user %s HAS access %s privilege %s", user, resource, privilege));
+            assertTrue(hasAccess(user, resource, privilege));
+        }
+    }
+
+    protected void checkUserDeniedResourceAccess(String user, Map<Policy.ResourceType, String> resource, String[] privileges) throws IOException {
+        // user IN the policy --> has all possible privileges to the specific resource
+        LOG.debug(String.format("Asserting user %s HAS NO access %s privileges %s", user, resource, Arrays.toString(privileges)));
+        assertFalse(hasAccess(user, resource, privileges));
+        for (String privilege : privileges) {
+            // user IN the policy --> has individual privileges to the specific resource
+            LOG.debug(String.format("Asserting user %s HAS No access %s privilege %s", user, resource, privilege));
+            assertFalse(hasAccess(user, resource, privilege));
+        }
+    }
+
+    protected void createPolicy(Policy policy) throws IOException {
+        String policyJson = mapper.writeValueAsString(policy);
+        LOG.info(String.format("Creating policy %s : %s", policy.name, policyJson));
+        rest.executeRequest(RESTClient.Method.POST, RANGER_POLICY_URL, policyJson);
+        waitForPolicyRefresh();
+    }
+
+    protected void deletePolicy(Policy policy) throws IOException {
+        LOG.info("Deleting policy " + policy.name);
+        try {
+            rest.executeRequest(RESTClient.Method.DELETE, getRangerPolicyUrl(policy.name));
+        } catch (RESTClient.ResourceNotFoundException e) {
+            // ignore error when deleting a policy that does not exit
+        }
+        waitForPolicyRefresh();
+    }
+
+    protected boolean hasAccess(String user, Map<Policy.ResourceType, String> resources, String... privileges) throws IOException {
+        LOG.info("Checking access for user " + user);
+        String response = rest.executeRequest(RESTClient.Method.POST, RPS_URL, getRPSRequestPayload(user, resources, privileges));
+        Map<String, Object> responseMap = mapper.readValue(response, typeMSO);
+        boolean allowed = (Boolean)((Map)((List) responseMap.get("access")).get(0)).get("allowed");
+        LOG.info(String.format("Access for user %s is allowed = %s", user, allowed));
+        return allowed;
+    }
+
+    private void waitForPolicyRefresh() {
+        try {
+            Thread.sleep(POLICY_REFRESH_INTERVAL);
+        }
+        catch (InterruptedException e) {
+            LOG.error(e);
+        }
+    }
+
+    private String getRangerPolicyUrl(String policyName) {
+        return RANGER_POLICY_URL + "?servicename=hawq&policyname=" + policyName;
+    }
+
+    private String getRPSRequestPayload(String user, Map<Policy.ResourceType, String> resources, String[] privileges) throws IOException {
+        Map<String, Object> request = new HashMap<>();
+        request.put("requestId", 9);
+        request.put("user", user);
+        request.put("clientIp", "123.0.0.21");
+        request.put("context", "CREATE SOME DATABASE OBJECT;");
+
+        Map<String, Object> access = new HashMap<>();
+        access.put("resource", resources);
+        access.put("privileges", privileges);
+
+        Set<Map<String, Object>> accesses = new HashSet<>();
+        accesses.add(access);
+        request.put("access", accesses);
+        return new ObjectMapper().writeValueAsString(request);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/SimpleResourceTestBase.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/SimpleResourceTestBase.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/SimpleResourceTestBase.java
new file mode 100644
index 0000000..8bd18e8
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/common/SimpleResourceTestBase.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests.common;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.*;
+
+public abstract class SimpleResourceTestBase extends ServiceTestBase {
+
+    protected Map<Policy.ResourceType, String> specificResource = new HashMap<>();
+    protected Map<Policy.ResourceType, String> parentUnknownResource = new HashMap<>();
+    protected Map<Policy.ResourceType, String> childUnknownResource = new HashMap<>();
+    protected Map<Policy.ResourceType, String> unknownResource = new HashMap<>();
+    protected String[] privileges = {};
+
+    @Before
+    public void beforeSimple() throws IOException {
+        specificResource = new HashMap<>();
+        parentUnknownResource = new HashMap<>();
+        childUnknownResource = new HashMap<>();
+        unknownResource = new HashMap<>();
+        privileges = new String[]{};
+    }
+
+    @Test
+    public void testSpecificResourceUserPolicy() throws IOException {
+        checkResourceUserPolicy(getResourceUserPolicy());
+    }
+
+    @Test
+    public void testStarResourceGpadminPolicy() throws IOException {
+        checkUserHasResourceAccess(GPADMIN_USER, specificResource, privileges);
+        // user NOT in the policy --> has NO access to the specific resource
+        assertFalse(hasAccess(UNKNOWN, specificResource, privileges));
+        // test that other existing user can't rely on gpadmin policy
+        assertFalse(hasAccess(TEST_USER, specificResource, privileges));
+        // user IN the policy --> has access to the unknown resource
+        assertTrue(hasAccess(GPADMIN_USER, unknownResource, privileges));
+    }
+
+    @Test
+    public void testSpecificResourcePublicGroupPolicy() throws IOException {
+        Policy policy = getResourceGroupPolicy();
+        createPolicy(policy);
+        checkUserHasResourceAccess(TEST_USER, specificResource, privileges);
+        // user NOT in the policy --> has access to the specific resource
+        assertTrue(hasAccess(UNKNOWN, specificResource, privileges));
+        // user IN the policy --> has NO access to the unknown resource
+        assertFalse(hasAccess(TEST_USER, unknownResource, privileges));
+        // test that user doesn't have access if policy is deleted
+        deletePolicy(policy);
+        assertFalse(hasAccess(TEST_USER, specificResource, privileges));
+    }
+
+    protected void checkResourceUserPolicy(Policy policy) throws IOException {
+        createPolicy(policy);
+        boolean policyDeleted = false;
+        try {
+            checkUserHasResourceAccess(TEST_USER, specificResource, privileges);
+            // user NOT in the policy --> has NO access to the specific resource
+            LOG.debug(String.format("Asserting user %s NO  access %s privileges %s", UNKNOWN, specificResource, Arrays.toString(privileges)));
+            assertFalse(hasAccess(UNKNOWN, specificResource, privileges));
+
+            // if resource has parents, assert edge cases
+            if (!parentUnknownResource.isEmpty()) {
+                // user IN the policy --> has access to the resource only for parentStar policies
+                assertEquals(policy.isParentStar, hasAccess(TEST_USER, parentUnknownResource, privileges));
+            }
+            if (!childUnknownResource.isEmpty()) {
+                // user IN the policy --> has access to the resource only for childStar policies
+                assertEquals(policy.isChildStar, hasAccess(TEST_USER, childUnknownResource, privileges));
+            }
+
+            // user IN the policy --> has NO access to the unknown resource
+            assertFalse(hasAccess(TEST_USER, unknownResource, privileges));
+            // test that user doesn't have access if policy is deleted
+            deletePolicy(policy);
+            policyDeleted = true;
+            assertFalse(hasAccess(TEST_USER, specificResource, privileges));
+        } finally {
+            // if a given test fails with assertion, still delete the policy not to impact other tests
+            if (!policyDeleted) {
+                deletePolicy(policy);
+            }
+        }
+    }
+
+    abstract protected Policy getResourceUserPolicy();
+    abstract protected Policy getResourceGroupPolicy();
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/resources/test-database.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-database.json b/ranger-plugin/integration/service/src/test/resources/test-database.json
deleted file mode 100644
index ffa3bfe..0000000
--- a/ranger-plugin/integration/service/src/test/resources/test-database.json
+++ /dev/null
@@ -1,46 +0,0 @@
-{
-  "isEnabled": true,
-  "service": "hawq",
-  "name": "DatabaseTest",
-  "policyType": 0,
-  "description": "Test policy for database resource",
-  "isAuditEnabled": true,
-  "resources": {
-    "schema": {
-      "values": ["*"],
-      "isExcludes": false,
-      "isRecursive": false
-    },
-    "database": {
-      "values": ["sirotan"],
-      "isExcludes": false,
-      "isRecursive": false
-    },
-    "function": {
-      "values": ["*"],
-      "isExcludes": false,
-      "isRecursive": false
-    }
-  },
-  "policyItems": [{
-    "accesses": [{
-      "type": "create",
-      "isAllowed": true
-    }, {
-      "type": "connect",
-      "isAllowed": true
-    }, {
-      "type": "temp",
-      "isAllowed": true
-    }],
-    "users": ["maria_dev"],
-    "groups": [],
-    "conditions": [],
-    "delegateAdmin": true
-  }],
-  "denyPolicyItems": [],
-  "allowExceptions": [],
-  "denyExceptions": [],
-  "dataMaskPolicyItems": [],
-  "rowFilterPolicyItems": []
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/resources/test-function-2.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-function-2.json b/ranger-plugin/integration/service/src/test/resources/test-function-2.json
deleted file mode 100644
index 5ae7f0b..0000000
--- a/ranger-plugin/integration/service/src/test/resources/test-function-2.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
-  "isEnabled": true,
-  "service": "hawq",
-  "name": "FunctionTest",
-  "policyType": 0,
-  "description": "Test policy for function resource",
-  "isAuditEnabled": true,
-  "resources": {
-    "schema": {
-      "values": ["*"],
-      "isExcludes": false,
-      "isRecursive": false
-    },
-    "database": {
-      "values": ["*"],
-      "isExcludes": false,
-      "isRecursive": false
-    },
-    "function": {
-      "values": ["atan"],
-      "isExcludes": false,
-      "isRecursive": false
-    }
-  },
-  "policyItems": [{
-    "accesses": [{
-      "type": "execute",
-      "isAllowed": true
-    }],
-    "users": ["maria_dev"],
-    "groups": [],
-    "conditions": [],
-    "delegateAdmin": true
-  }],
-  "denyPolicyItems": [],
-  "allowExceptions": [],
-  "denyExceptions": [],
-  "dataMaskPolicyItems": [],
-  "rowFilterPolicyItems": []
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/resources/test-function.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-function.json b/ranger-plugin/integration/service/src/test/resources/test-function.json
deleted file mode 100644
index 74d5d83..0000000
--- a/ranger-plugin/integration/service/src/test/resources/test-function.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
-  "isEnabled": true,
-  "service": "hawq",
-  "name": "FunctionTest",
-  "policyType": 0,
-  "description": "Test policy for function resource",
-  "isAuditEnabled": true,
-  "resources": {
-    "schema": {
-      "values": ["siroschema"],
-      "isExcludes": false,
-      "isRecursive": false
-    },
-    "database": {
-      "values": ["sirotan"],
-      "isExcludes": false,
-      "isRecursive": false
-    },
-    "function": {
-      "values": ["atan"],
-      "isExcludes": false,
-      "isRecursive": false
-    }
-  },
-  "policyItems": [{
-    "accesses": [{
-      "type": "execute",
-      "isAllowed": true
-    }],
-    "users": ["maria_dev"],
-    "groups": [],
-    "conditions": [],
-    "delegateAdmin": true
-  }],
-  "denyPolicyItems": [],
-  "allowExceptions": [],
-  "denyExceptions": [],
-  "dataMaskPolicyItems": [],
-  "rowFilterPolicyItems": []
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/resources/test-language-2.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-language-2.json b/ranger-plugin/integration/service/src/test/resources/test-language-2.json
deleted file mode 100644
index 93a41fe..0000000
--- a/ranger-plugin/integration/service/src/test/resources/test-language-2.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
-  "isEnabled": true,
-  "service": "hawq",
-  "name": "LanguageTest",
-  "policyType": 0,
-  "description": "Test policy for language resource",
-  "isAuditEnabled": true,
-  "resources": {
-    "language": {
-      "values": ["sql"],
-      "isExcludes": false,
-      "isRecursive": false
-    },
-    "database": {
-      "values": ["*"],
-      "isExcludes": false,
-      "isRecursive": false
-    }
-  },
-  "policyItems": [{
-    "accesses": [{
-      "type": "usage",
-      "isAllowed": true
-    }],
-    "users": ["maria_dev"],
-    "groups": [],
-    "conditions": [],
-    "delegateAdmin": true
-  }],
-  "denyPolicyItems": [],
-  "allowExceptions": [],
-  "denyExceptions": [],
-  "dataMaskPolicyItems": [],
-  "rowFilterPolicyItems": []
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/resources/test-language.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-language.json b/ranger-plugin/integration/service/src/test/resources/test-language.json
deleted file mode 100644
index cba2f43..0000000
--- a/ranger-plugin/integration/service/src/test/resources/test-language.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
-  "isEnabled": true,
-  "service": "hawq",
-  "name": "LanguageTest",
-  "policyType": 0,
-  "description": "Test policy for language resource",
-  "isAuditEnabled": true,
-  "resources": {
-    "language": {
-      "values": ["sql"],
-      "isExcludes": false,
-      "isRecursive": false
-    },
-    "database": {
-      "values": ["sirotan"],
-      "isExcludes": false,
-      "isRecursive": false
-    }
-  },
-  "policyItems": [{
-    "accesses": [{
-      "type": "usage",
-      "isAllowed": true
-    }],
-    "users": ["maria_dev"],
-    "groups": [],
-    "conditions": [],
-    "delegateAdmin": true
-  }],
-  "denyPolicyItems": [],
-  "allowExceptions": [],
-  "denyExceptions": [],
-  "dataMaskPolicyItems": [],
-  "rowFilterPolicyItems": []
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/resources/test-protocol.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-protocol.json b/ranger-plugin/integration/service/src/test/resources/test-protocol.json
deleted file mode 100644
index d59caed..0000000
--- a/ranger-plugin/integration/service/src/test/resources/test-protocol.json
+++ /dev/null
@@ -1,33 +0,0 @@
-{
-  "isEnabled": true,
-  "service": "hawq",
-  "name": "ProtocolTest",
-  "policyType": 0,
-  "description": "Test policy for protocol resource",
-  "isAuditEnabled": true,
-  "resources": {
-    "protocol": {
-      "values": ["pxf"],
-      "isExcludes": false,
-      "isRecursive": false
-    }
-  },
-  "policyItems": [{
-    "accesses": [{
-      "type": "select",
-      "isAllowed": true
-    }, {
-      "type": "insert",
-      "isAllowed": true
-    }],
-    "users": ["maria_dev"],
-    "groups": [],
-    "conditions": [],
-    "delegateAdmin": true
-  }],
-  "denyPolicyItems": [],
-  "allowExceptions": [],
-  "denyExceptions": [],
-  "dataMaskPolicyItems": [],
-  "rowFilterPolicyItems": []
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8a5e65bf/ranger-plugin/integration/service/src/test/resources/test-tablespace.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-tablespace.json b/ranger-plugin/integration/service/src/test/resources/test-tablespace.json
deleted file mode 100644
index a45ecea..0000000
--- a/ranger-plugin/integration/service/src/test/resources/test-tablespace.json
+++ /dev/null
@@ -1,30 +0,0 @@
-{
-  "isEnabled": true,
-  "service": "hawq",
-  "name": "TablespaceTest",
-  "policyType": 0,
-  "description": "Test policy for tablespace resource",
-  "isAuditEnabled": true,
-  "resources": {
-    "tablespace": {
-      "values": ["pg_global"],
-      "isExcludes": false,
-      "isRecursive": false
-    }
-  },
-  "policyItems": [{
-    "accesses": [{
-      "type": "create",
-      "isAllowed": true
-    }],
-    "users": ["maria_dev"],
-    "groups": [],
-    "conditions": [],
-    "delegateAdmin": true
-  }],
-  "denyPolicyItems": [],
-  "allowExceptions": [],
-  "denyExceptions": [],
-  "dataMaskPolicyItems": [],
-  "rowFilterPolicyItems": []
-}
\ No newline at end of file



[32/50] [abbrv] incubator-hawq git commit: HAWQ-1203. Ranger Plugin Service Implementation. (with contributions by Lav Jain and Leslie Chang) (close #1092)

Posted by es...@apache.org.
HAWQ-1203. Ranger Plugin Service Implementation. (with contributions by Lav Jain and Leslie Chang)
(close #1092)


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/7f36b35b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/7f36b35b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/7f36b35b

Branch: refs/heads/2.1.0.0-incubating
Commit: 7f36b35bd059412b45444f613fb5b5e108a3a83e
Parents: e46f06c
Author: Alexander Denissov <ad...@pivotal.io>
Authored: Mon Jan 16 11:46:36 2017 -0800
Committer: Alexander Denissov <ad...@pivotal.io>
Committed: Wed Jan 18 10:57:09 2017 -0800

----------------------------------------------------------------------
 .gitignore                                      |   7 +
 ranger-plugin/admin-plugin/pom.xml              |  90 +++++
 .../apache/hawq/ranger/model/HawqProtocols.java |  49 +++
 .../apache/hawq/ranger/model/HawqResource.java  |  29 ++
 .../apache/hawq/ranger/service/HawqClient.java  | 314 ++++++++++++++++++
 .../hawq/ranger/service/HawqResourceMgr.java    |  71 ++++
 .../hawq/ranger/service/RangerServiceHawq.java  |  66 ++++
 .../hawq/ranger/service/HawqClientTest.java     | 217 +++++++++++++
 .../ranger/service/RangerServiceHawqTest.java   |  90 +++++
 .../src/test/resources/log4j.properties         |  34 ++
 ranger-plugin/conf/ranger-servicedef-hawq.json  | 287 ++++++++++++++++
 ranger-plugin/conf/tomcat-server.xml            |  60 ++++
 ranger-plugin/integration/admin/pom.xml         |  70 ++++
 .../integration/admin/ListDatabasesTest.java    |  55 ++++
 .../integration/admin/ListFunctionsTest.java    | 250 ++++++++++++++
 .../integration/admin/ListLanguagesTest.java    | 118 +++++++
 .../integration/admin/ListProtocolsTest.java    |  55 ++++
 .../integration/admin/ListSchemasTest.java      | 126 +++++++
 .../integration/admin/ListSequencesTest.java    | 250 ++++++++++++++
 .../integration/admin/ListTablesTest.java       | 250 ++++++++++++++
 .../integration/admin/ListTablespacesTest.java  |  55 ++++
 .../integration/admin/LookupTestBase.java       |  65 ++++
 .../src/test/resources/admin-tests-ddl.sql      |  61 ++++
 .../admin/src/test/resources/log4j.properties   |  34 ++
 ranger-plugin/integration/pom.xml               |  67 ++++
 ranger-plugin/integration/service/pom.xml       |  68 ++++
 .../integration/service/tests/DatabaseTest.java |  67 ++++
 .../integration/service/tests/FunctionTest.java |  91 ++++++
 .../integration/service/tests/LanguageTest.java |  83 +++++
 .../integration/service/tests/ProtocolTest.java |  67 ++++
 .../integration/service/tests/RPSRequest.java   |  60 ++++
 .../integration/service/tests/RPSResponse.java  |  42 +++
 .../service/tests/ServiceBaseTest.java          | 116 +++++++
 .../service/tests/TablespaceTest.java           |  67 ++++
 .../ranger/integration/service/tests/Utils.java |  76 +++++
 .../service/src/test/resources/log4j.properties |  35 ++
 .../src/test/resources/test-database.json       |  46 +++
 .../src/test/resources/test-function-2.json     |  40 +++
 .../src/test/resources/test-function.json       |  40 +++
 .../src/test/resources/test-language-2.json     |  35 ++
 .../src/test/resources/test-language.json       |  35 ++
 .../src/test/resources/test-protocol.json       |  33 ++
 .../src/test/resources/test-tablespace.json     |  30 ++
 ranger-plugin/pom.xml                           | 248 ++++++++++++++
 ranger-plugin/scripts/register_hawq.sh          | 217 +++++++++++++
 ranger-plugin/scripts/rps.sh                    |  60 ++++
 ranger-plugin/scripts/rps_env.sh                |  30 ++
 ranger-plugin/service/pom.xml                   |  88 +++++
 .../ranger/authorization/HawqAuthorizer.java    |  36 ++
 .../authorization/RangerHawqAuthorizer.java     | 263 +++++++++++++++
 .../authorization/RangerHawqPluginResource.java |  86 +++++
 .../authorization/ServiceExceptionMapper.java   |  97 ++++++
 .../apache/hawq/ranger/authorization/Utils.java |  80 +++++
 .../model/AuthorizationRequest.java             | 115 +++++++
 .../model/AuthorizationResponse.java            |  63 ++++
 .../authorization/model/HawqPrivilege.java      |  61 ++++
 .../authorization/model/HawqResource.java       |  46 +++
 .../authorization/model/ResourceAccess.java     |  85 +++++
 .../service/src/main/resources/log4j.properties |  42 +++
 .../src/main/resources/ranger-hawq-security.xml |  92 ++++++
 .../service/src/main/resources/rps.properties   |  17 +
 .../service/src/main/webapp/WEB-INF/web.xml     |  72 ++++
 .../RangerHawqAuthorizerAppIdTest.java          |  39 +++
 .../authorization/RangerHawqAuthorizerTest.java | 325 +++++++++++++++++++
 .../RangerHawqPluginResourceTest.java           |  79 +++++
 .../ServiceExceptionMapperTest.java             |  61 ++++
 .../hawq/ranger/authorization/UtilsTest.java    |  48 +++
 .../authorization/model/HawqPrivilegeTest.java  |  71 ++++
 .../authorization/model/HawqResourceTest.java   |  48 +++
 .../service/src/test/resources/log4j.properties |  42 +++
 .../service/src/test/resources/rps.properties   |  17 +
 71 files changed, 6329 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 2076819..24039c6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -41,6 +41,12 @@ objfiles.txt
 .pydevproject
 .cproject
 .settings
+.classpath
+
+# IDEA Project
+*.iml
+.idea
+
 # Ctags
 **/tags
 
@@ -54,6 +60,7 @@ env.sh
 ext/
 plr.tgz
 autom4te.cache/
+**/target
 
 # coverage
 *.gcda

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/admin-plugin/pom.xml
----------------------------------------------------------------------
diff --git a/ranger-plugin/admin-plugin/pom.xml b/ranger-plugin/admin-plugin/pom.xml
new file mode 100644
index 0000000..67c0824
--- /dev/null
+++ b/ranger-plugin/admin-plugin/pom.xml
@@ -0,0 +1,90 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.apache.hawq</groupId>
+    <artifactId>ranger-plugin-admin</artifactId>
+    <packaging>jar</packaging>
+    <name>HAWQ Ranger Admin Plugin</name>
+    <description>HAWQ Ranger Admin Plugin</description>
+    <parent>
+        <groupId>org.apache.hawq</groupId>
+        <artifactId>ranger-plugin</artifactId>
+        <version>2.1.0.0</version>
+        <relativePath>..</relativePath>
+    </parent>
+
+    <build>
+        <plugins>
+            <plugin>
+                <artifactId>maven-dependency-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <phase>package</phase>
+                        <goals>
+                            <goal>copy-dependencies</goal>
+                        </goals>
+                        <configuration>
+                            <outputDirectory>${project.build.directory}/lib</outputDirectory>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+    <dependencies>
+
+        <!-- Compilation Dependencies -->
+        <dependency>
+            <groupId>org.apache.ranger</groupId>
+            <artifactId>ranger-plugins-common</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>log4j</groupId>
+            <artifactId>log4j</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>postgresql</groupId>
+            <artifactId>postgresql</artifactId>
+        </dependency>
+
+        <!-- Test Dependencies -->
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.mockito</groupId>
+            <artifactId>mockito-core</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.powermock</groupId>
+            <artifactId>powermock-module-junit4</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.powermock</groupId>
+            <artifactId>powermock-api-mockito</artifactId>
+        </dependency>
+
+    </dependencies>
+
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/model/HawqProtocols.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/model/HawqProtocols.java b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/model/HawqProtocols.java
new file mode 100644
index 0000000..c6e51b0
--- /dev/null
+++ b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/model/HawqProtocols.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hawq.ranger.model;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public enum HawqProtocols {
+
+    PROTOCOL_FILE("file"),
+    PROTOCOL_FTP("ftp"),
+    PROTOCOL_HTTP("http"),
+    PROTOCOL_GPFDIST("gpfdist"),
+    PROTOCOL_GPFDISTS("gpfdists"),
+    PROTOCOL_PXF("pxf");
+
+    private final String name;
+
+    private HawqProtocols(String name) {
+        this.name = name;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    public static List<String> getAllProtocols() {
+        List<String> protocols = new ArrayList<>();
+        for(HawqProtocols protocol : HawqProtocols.values()) {
+            protocols.add(protocol.getName());
+        }
+        return protocols;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/model/HawqResource.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/model/HawqResource.java b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/model/HawqResource.java
new file mode 100644
index 0000000..aae0b04
--- /dev/null
+++ b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/model/HawqResource.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hawq.ranger.model;
+
+public enum HawqResource {
+    DATABASE,
+    SCHEMA,
+    TABLE,
+    SEQUENCE,
+    FUNCTION,
+    LANGUAGE,
+    TABLESPACE,
+    PROTOCOL
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/HawqClient.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/HawqClient.java b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/HawqClient.java
new file mode 100644
index 0000000..d2606be
--- /dev/null
+++ b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/HawqClient.java
@@ -0,0 +1,314 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hawq.ranger.service;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hawq.ranger.model.HawqProtocols;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.ResultSet;
+import java.util.*;
+
+import static org.apache.ranger.plugin.client.BaseClient.generateResponseDataMap;
+
+public class HawqClient {
+
+    private static final Log LOG = LogFactory.getLog(HawqClient.class);
+
+    public static final String CONNECTION_SUCCESSFUL_MESSAGE = "ConnectionTest Successful";
+    public static final String CONNECTION_FAILURE_MESSAGE = "Unable to retrieve any databases using given parameters.";
+
+    public static final String DATABASE_LIST_QUERY = "SELECT datname from pg_database WHERE " +
+            "datname NOT IN ('template0', 'template1', 'hcatalog') and datname LIKE ?";
+    public static final String TABLESPACE_LIST_QUERY = "SELECT spcname from pg_tablespace WHERE spcname LIKE ?";
+    public static final String PROTOCOL_LIST_QUERY = "SELECT ptcname from pg_catalog.pg_extprotocol WHERE ptcname LIKE ?";
+    public static final String SCHEMA_LIST_QUERY = "SELECT schema_name from information_schema.schemata WHERE " +
+            "schema_name NOT IN ('pg_catalog', 'information_schema', 'hawq_toolkit', 'pg_bitmapindex', 'pg_aoseg') AND schema_name NOT LIKE 'pg_toast%' AND schema_name LIKE ?";
+    public static final String LANGUAGE_LIST_QUERY = "SELECT lanname from pg_language WHERE lanname LIKE ?";
+    public static final String SEQUENCE_LIST_QUERY = "SELECT schemaname, relname from pg_statio_all_sequences WHERE relname LIKE ?";
+    public static final String FUNCTION_LIST_QUERY = "SELECT n.nspname, p.proname FROM pg_catalog.pg_proc p " +
+            "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace WHERE n.nspname NOT IN " +
+            "('pg_catalog', 'information_schema', 'hawq_toolkit', 'pg_bitmapindex', 'pg_aoseg') AND n.nspname NOT LIKE 'pg_toast%' AND p.proname LIKE ?";
+    public static final String TABLE_LIST_QUERY = "SELECT c.relname, n.nspname FROM pg_catalog.pg_class c " +
+            "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r','','v') AND n.nspname NOT IN " +
+            "('pg_catalog', 'information_schema', 'hawq_toolkit', 'pg_bitmapindex', 'pg_aoseg') AND n.nspname NOT LIKE 'pg_toast%' AND c.relname LIKE ?";
+
+    public static final String DATNAME = "datname";
+    public static final String SPCNAME = "spcname";
+    public static final String PTCNAME = "ptcname";
+    public static final String SCHEMA_NAME = "schema_name";
+    public static final String LANGUAGE_NAME = "lanname";
+    public static final String RELNAME = "relname";
+    public static final String SCHEMANAME = "schemaname";
+    public static final String PRONAME = "proname";
+    public static final String NSPNAME = "nspname";
+    public static final String WILDCARD = "*";
+
+    public static final List<String> INTERNAL_PROTOCOLS = HawqProtocols.getAllProtocols();
+    private static final String DEFAULT_DATABASE = "postgres";
+    private static final String JDBC_DRIVER_CLASS = "org.postgresql.Driver";
+
+    private Map<String, String> connectionProperties;
+
+    // we need to load class for the Postgres Driver directly to allow it to register with DriverManager
+    // since DriverManager's classloader will not be able to find it by itself due to plugin's special classloaders
+    static {
+        try {
+            Class.forName(JDBC_DRIVER_CLASS);
+        } catch (Throwable e) {
+            LOG.error("<== HawqClient.initializer : Unable to load JDBC driver " + JDBC_DRIVER_CLASS + " : " + e.getMessage());
+            throw new ExceptionInInitializerError(e);
+        }
+    }
+
+    public HawqClient(Map<String, String> connectionProperties) throws SQLException {
+        this.connectionProperties = connectionProperties;
+    }
+
+    /**
+     * Uses the connectionProperties and attempts to connect to Hawq.
+     * Returns a message depending on success or failure.
+     *
+     * @param connectionProperties Map which contains hostname, port, username, and password
+     * @return Map which contains connectivityStatus, message, description, objectId, and fieldName
+     */
+
+    public HashMap<String, Object> checkConnection(Map<String, String> connectionProperties) throws Exception {
+
+        boolean isConnected = false;
+        HashMap<String, Object> result = new HashMap<>();
+        Connection conn = null;
+
+        String description = CONNECTION_FAILURE_MESSAGE;
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("<== HawqClient.checkConnection Starting connection to hawq");
+        }
+
+        try {
+            conn = getConnection(connectionProperties);
+            if(conn.getCatalog() != null) {
+                isConnected = true;
+                description = CONNECTION_SUCCESSFUL_MESSAGE;
+            }
+        } catch (SQLException e) {
+            LOG.error("<== HawqClient.checkConnection Error: Failed to connect" + e);
+            description = e.getMessage();
+        } finally {
+            closeConnection(conn);
+        }
+
+        String message = isConnected ? CONNECTION_SUCCESSFUL_MESSAGE : CONNECTION_FAILURE_MESSAGE;
+
+        generateResponseDataMap(isConnected, message, description, null, null, result);
+
+        return result;
+    }
+
+    public List<String> getDatabaseList(String userInput) throws SQLException {
+        return queryHawq(userInput, DATNAME, DATABASE_LIST_QUERY, null);
+    }
+
+    public List<String> getTablespaceList(String userInput) throws SQLException {
+        return queryHawq(userInput, SPCNAME, TABLESPACE_LIST_QUERY, null);
+    }
+
+    public List<String> getProtocolList(String userInput) throws SQLException {
+        Set<String> allProtocols = new HashSet<>();
+        for (String protocol : INTERNAL_PROTOCOLS) {
+            if(protocol.startsWith(userInput) || userInput.equals(WILDCARD)) {
+                allProtocols.add(protocol);
+            }
+        }
+        allProtocols.addAll(queryHawq(userInput, PTCNAME, PROTOCOL_LIST_QUERY, null));
+        return new ArrayList<>(allProtocols);
+    }
+
+    public List<String> getSchemaList(String userInput, Map<String, List<String>> resources) throws SQLException {
+        return queryHawqPerDb(userInput, resources.get("database"), SCHEMA_NAME, SCHEMA_LIST_QUERY);
+    }
+
+    public List<String> getLanguageList(String userInput,  Map<String, List<String>> resources) throws SQLException {
+        return queryHawqPerDb(userInput, resources.get("database"), LANGUAGE_NAME, LANGUAGE_LIST_QUERY);
+    }
+
+    public List<String> getTableList(String userInput, Map<String, List<String>> resources) throws SQLException {
+        return queryHawqPerDbAndSchema(userInput, resources, NSPNAME, RELNAME, TABLE_LIST_QUERY);
+    }
+
+    public List<String> getSequenceList(String userInput, Map<String, List<String>> resources) throws SQLException {
+        return queryHawqPerDbAndSchema(userInput, resources, SCHEMANAME, RELNAME, SEQUENCE_LIST_QUERY);
+    }
+
+    public List<String> getFunctionList(String userInput, Map<String, List<String>> resources) throws SQLException {
+        return queryHawqPerDbAndSchema(userInput, resources, NSPNAME, PRONAME, FUNCTION_LIST_QUERY);
+    }
+
+    private List<String> queryHawqPerDb(String userInput, List<String> databases, String columnName, String query) throws SQLException {
+        Set<String> uniqueResults = new HashSet<>();
+
+        //do for all databases
+        if (databases.contains(WILDCARD)) {
+            databases = getDatabaseList(WILDCARD);
+        }
+
+        for (String db : databases) {
+            uniqueResults.addAll(queryHawq(userInput, columnName, query, db));
+        }
+        return new ArrayList<>(uniqueResults);
+    }
+
+    private List<String> queryHawqPerDbAndSchema(String userInput, Map<String, List<String>> resources, String schemaColumnName, String resultColumnName, String query) throws SQLException {
+        Set<String> uniqueResults = new HashSet<>();
+        List<String> databases = resources.get("database");
+        List<String> schemas = resources.get("schema");
+
+        Connection conn = null;
+        PreparedStatement preparedStatement = null;
+        ResultSet resultSet = null;
+
+        if (databases.contains(WILDCARD)) {
+            databases = getDatabaseList(WILDCARD);
+        }
+
+        for (String db: databases) {
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("<== HawqClient.queryHawqPerDbAndSchema: Connecting to db: " + db);
+            }
+
+            try {
+                conn = getConnection(connectionProperties, db);
+                preparedStatement = handleWildcardPreparedStatement(userInput, query, conn);
+
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug("<== HawqClient.queryHawqPerDbAndSchema Starting query: " + query);
+                }
+
+                resultSet = preparedStatement.executeQuery();
+
+                while(resultSet.next()) {
+                    if(schemas.contains(resultSet.getString(schemaColumnName)) || schemas.contains(WILDCARD)) {
+                        uniqueResults.add(resultSet.getString(resultColumnName));
+                    }
+                }
+
+                if (LOG.isDebugEnabled()) {
+                    LOG.debug("<== HawqClient.queryHawqPerDbAndSchema Query result: " + uniqueResults.toString());
+                }
+
+            } catch (SQLException e) {
+                LOG.error("<== HawqClient.queryHawqPerDbAndSchema Error: Failed to get results for query: " + query + ", Error: " + e);
+            } finally {
+                closeResultSet(resultSet);
+                closeStatement(preparedStatement);
+                closeConnection(conn);
+            }
+        }
+        return new ArrayList<>(uniqueResults);
+    }
+
+    private List<String> queryHawq(String userInput, String columnName, String query, String database) {
+        List<String> result = new ArrayList<>();
+        Connection conn = null;
+        PreparedStatement preparedStatement = null;
+        ResultSet resultSet = null;
+
+        try {
+            conn = getConnection(connectionProperties, database);
+            preparedStatement = handleWildcardPreparedStatement(userInput, query, conn);
+
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("<== HawqClient.queryHawq Starting query: " + query);
+            }
+
+            resultSet = preparedStatement.executeQuery();
+
+            while(resultSet.next()) {
+                result.add(resultSet.getString(columnName));
+            }
+
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("<== HawqClient.queryHawq Query result: " + result.toString());
+            }
+
+        } catch (SQLException e) {
+            LOG.error("<== HawqClient.queryHawq Error: Failed to get result from query: " + query + ", Error: " + e);
+        } finally {
+            closeResultSet(resultSet);
+            closeStatement(preparedStatement);
+            closeConnection(conn);
+        }
+
+        return result;
+    }
+
+    private Connection getConnection(Map<String, String> connectionProperties) throws SQLException {
+        return getConnection(connectionProperties, null);
+    }
+
+    private Connection getConnection(Map<String, String> connectionProperties, String database) throws SQLException {
+
+        String db = database != null ? database : DEFAULT_DATABASE;
+        String url = String.format("jdbc:postgresql://%s:%s/%s", connectionProperties.get("hostname"), connectionProperties.get("port"), db);
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("<== HawqClient.checkConnection Connecting to: (" + url + ") with user: " + connectionProperties.get("username") );
+        }
+
+        Properties props = new Properties();
+        props.setProperty("user", connectionProperties.get("username"));
+        props.setProperty("password", connectionProperties.get("password"));
+        return DriverManager.getConnection(url, props);
+    }
+
+    private PreparedStatement handleWildcardPreparedStatement(String userInput, String query, Connection conn) throws SQLException {
+        PreparedStatement preparedStatement = conn.prepareStatement(query);
+        preparedStatement.setString(1, userInput.equals(WILDCARD) ? "%" : (userInput + "%"));
+        return preparedStatement;
+    }
+
+    private void closeResultSet(ResultSet rs) {
+        try {
+            if (rs != null) rs.close();
+        } catch (Exception e) {
+            // ignore
+        }
+    }
+
+    private void closeStatement(PreparedStatement st) {
+        try {
+            if (st != null) st.close();
+        } catch (Exception e) {
+            // ignore
+        }
+    }
+
+    private void closeConnection(Connection conn) {
+        try {
+            if (conn != null) conn.close();
+        } catch (Exception e) {
+            // ignore
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/HawqResourceMgr.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/HawqResourceMgr.java b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/HawqResourceMgr.java
new file mode 100644
index 0000000..214ebdb
--- /dev/null
+++ b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/HawqResourceMgr.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hawq.ranger.service;
+
+import org.apache.hawq.ranger.model.HawqResource;
+import org.apache.ranger.plugin.service.ResourceLookupContext;
+
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Map;
+import java.util.Collections;
+
+public abstract class HawqResourceMgr {
+
+    public static List<String> getHawqResources(Map<String, String> configs,
+                                                ResourceLookupContext context) throws SQLException {
+        String userInput = context.getUserInput();
+        HawqResource hawqResource = HawqResource.valueOf(context.getResourceName().toUpperCase());
+        Map<String, List<String>> resources = context.getResources();
+
+        List<String> result;
+        HawqClient hawqClient = new HawqClient(configs);
+
+        switch (hawqResource) {
+            case DATABASE:
+                result = hawqClient.getDatabaseList(userInput);
+                break;
+            case TABLESPACE:
+                result = hawqClient.getTablespaceList(userInput);
+                break;
+            case PROTOCOL:
+                result = hawqClient.getProtocolList(userInput);
+                break;
+            case SCHEMA:
+                result = hawqClient.getSchemaList(userInput, resources);
+                break;
+            case LANGUAGE:
+                result = hawqClient.getLanguageList(userInput, resources);
+                break;
+            case TABLE:
+                result = hawqClient.getTableList(userInput, resources);
+                break;
+            case SEQUENCE:
+                result = hawqClient.getSequenceList(userInput, resources);
+                break;
+            case FUNCTION:
+                result = hawqClient.getFunctionList(userInput, resources);
+                break;
+            default:
+                throw new IllegalArgumentException("Resource requested does not exist.");
+        }
+
+        Collections.sort(result);
+        return result;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/RangerServiceHawq.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/RangerServiceHawq.java b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/RangerServiceHawq.java
new file mode 100644
index 0000000..0f12191
--- /dev/null
+++ b/ranger-plugin/admin-plugin/src/main/java/org/apache/hawq/ranger/service/RangerServiceHawq.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hawq.ranger.service;
+
+import org.apache.ranger.plugin.client.HadoopException;
+import org.apache.ranger.plugin.service.RangerBaseService;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.plugin.service.ResourceLookupContext;
+
+import java.util.*;
+
+public class RangerServiceHawq extends RangerBaseService {
+
+    private static final Log LOG = LogFactory.getLog(RangerServiceHawq.class);
+
+    @Override
+    public HashMap<String, Object> validateConfig() throws Exception {
+        boolean isDebugEnabled = LOG.isDebugEnabled();
+
+        if(isDebugEnabled) {
+            LOG.debug("==> RangerServiceHawq.validateConfig Service: (hawq)");
+        }
+
+        HashMap<String, Object> result = new HashMap<>();
+
+        if (configs != null) {
+            try  {
+                HawqClient hawqClient = new HawqClient(configs);
+                result = hawqClient.checkConnection(configs);
+            } catch (HadoopException e) {
+                LOG.error("<== RangerServiceHawq.validateConfig Error:" + e);
+                throw e;
+            }
+        }
+
+        if (isDebugEnabled) {
+            LOG.debug("<== RangerServiceHawq.validateConfig Response : (" + result + ")");
+        }
+        return result;
+    }
+
+    @Override
+    public List<String> lookupResource(ResourceLookupContext context) throws Exception {
+        List<String> resources = HawqResourceMgr.getHawqResources(getConfigs(), context);
+
+        return resources;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/admin-plugin/src/test/java/org/apache/hawq/ranger/service/HawqClientTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/admin-plugin/src/test/java/org/apache/hawq/ranger/service/HawqClientTest.java b/ranger-plugin/admin-plugin/src/test/java/org/apache/hawq/ranger/service/HawqClientTest.java
new file mode 100644
index 0000000..7d624d8
--- /dev/null
+++ b/ranger-plugin/admin-plugin/src/test/java/org/apache/hawq/ranger/service/HawqClientTest.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hawq.ranger.service;
+
+import org.apache.ranger.plugin.client.BaseClient;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.powermock.api.mockito.PowerMockito;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.List;
+import java.util.Arrays;
+import java.util.Collections;
+
+import static org.apache.hawq.ranger.service.HawqClient.*;
+import static org.junit.Assert.*;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyMap;
+import static org.mockito.Matchers.anyString;
+import static org.powermock.api.mockito.PowerMockito.*;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(HawqClient.class)
+public class HawqClientTest {
+
+    @Mock
+    public Connection conn;
+    @Mock
+    public PreparedStatement preparedStatement;
+    @Mock
+    public ResultSet resultSet;
+
+    public HawqClient hawqClientSpy;
+
+    public HawqClient hawqClient;
+    private Map<String, String> connectionProperties;
+    private Map<String, List<String>> resources;
+
+    @Before
+    public void setUp() throws SQLException {
+        connectionProperties = new HashMap<>();
+        connectionProperties.put("hostname", "hostname");
+        connectionProperties.put("port", "5432");
+        connectionProperties.put("hostname", "hostname");
+        connectionProperties.put("username", "username");
+        connectionProperties.put("password", "password");
+
+        mockStatic(DriverManager.class);
+        suppress(constructor(BaseClient.class, String.class, Map.class));
+        hawqClient = new HawqClient(connectionProperties);
+
+        hawqClientSpy = PowerMockito.spy(hawqClient);
+
+        resources = new HashMap<>();
+        List<String> dbs = Arrays.asList("db1", "db2");
+        List<String> schemas = Arrays.asList("schema1", "schema2");
+        resources.put("database", dbs);
+        resources.put("schema", schemas);
+    }
+
+    @Test
+    public void testCheckConnection_Failure() throws Exception {
+        when(DriverManager.getConnection(anyString(), any(Properties.class))).thenReturn(conn);
+        when(conn.getCatalog()).thenReturn(null);
+        Map<String, Object> response = hawqClient.checkConnection(connectionProperties);
+        assertEquals(CONNECTION_FAILURE_MESSAGE, response.get("message"));
+        assertFalse((Boolean) response.get("connectivityStatus"));
+    }
+
+    @Test
+    public void testCheckConnection_Success() throws Exception {
+        when(DriverManager.getConnection(anyString(), any(Properties.class))).thenReturn(conn);
+        when(conn.getCatalog()).thenReturn("catalog");
+        Map<String, Object> response = hawqClient.checkConnection(connectionProperties);
+        assertEquals(CONNECTION_SUCCESSFUL_MESSAGE, response.get("message"));
+        assertTrue((Boolean) response.get("connectivityStatus"));
+    }
+
+    @Test
+    public void testCheckConnection_ThrowsSQLException_Failure() throws Exception {
+        when(DriverManager.getConnection(anyString(), any(Properties.class))).thenThrow(new SQLException("Failed to connect"));
+        Map<String, Object> response = hawqClient.checkConnection(connectionProperties);
+        assertEquals(CONNECTION_FAILURE_MESSAGE, response.get("message"));
+        assertEquals("Failed to connect", response.get("description"));
+        assertFalse((Boolean) response.get("connectivityStatus"));
+    }
+
+    @Test
+    public void testDatabaseList_Success() throws Exception {
+        when(conn.prepareStatement(DATABASE_LIST_QUERY)).thenReturn(preparedStatement);
+        when(preparedStatement.executeQuery()).thenReturn(resultSet);
+        when(resultSet.next()).thenReturn(true).thenReturn(true).thenReturn(false);
+        when(resultSet.getString(DATNAME)).thenReturn("db1").thenReturn("db2");
+        PowerMockito.doReturn(conn).when(hawqClientSpy, "getConnection", anyMap(), anyString());
+        assertEquals(Arrays.asList("db1", "db2"), hawqClientSpy.getDatabaseList("d"));
+    }
+
+    @Test
+    public void testTablespaceList_Success() throws Exception {
+        doReturn(Arrays.asList("tablespace1", "tablespace2")).when(hawqClientSpy, "queryHawq", "t", SPCNAME, TABLESPACE_LIST_QUERY, null);
+        assertEquals(Arrays.asList("tablespace1", "tablespace2"), hawqClientSpy.getTablespaceList("t"));
+    }
+
+    @Test
+    public void testProtocolList_Success() throws Exception {
+        List<String> expectedResult = Arrays.asList("protocol1", "protocol2", "pxf");
+        doReturn(Arrays.asList("protocol1", "protocol2")).when(hawqClientSpy, "queryHawq", "p", PTCNAME, PROTOCOL_LIST_QUERY, null);
+        List<String> result = hawqClientSpy.getProtocolList("p");
+        Collections.sort(result);
+        assertEquals(expectedResult, result);
+    }
+
+    @Test
+    public void testSchemaList_MultipleDb_Success() throws Exception {
+        doReturn(Arrays.asList("schema1")).when(hawqClientSpy, "queryHawq", "s", SCHEMA_NAME, SCHEMA_LIST_QUERY, "db1");
+        doReturn(Arrays.asList("schema2")).when(hawqClientSpy, "queryHawq", "s", SCHEMA_NAME, SCHEMA_LIST_QUERY, "db2");
+        List<String> result = hawqClientSpy.getSchemaList("s", resources);
+        Collections.sort(result);
+        assertEquals(Arrays.asList("schema1", "schema2"), result);
+    }
+
+    @Test
+    public void testSchemaList_AllDb_Success() throws Exception {
+        resources.put("database", Arrays.asList(WILDCARD));
+        doReturn(Arrays.asList("db1", "db2", "db3")).when(hawqClientSpy, "getDatabaseList", WILDCARD);
+        doReturn(Arrays.asList("schema1")).when(hawqClientSpy, "queryHawq", "s", SCHEMA_NAME, SCHEMA_LIST_QUERY, "db1");
+        doReturn(Arrays.asList("schema2")).when(hawqClientSpy, "queryHawq", "s", SCHEMA_NAME, SCHEMA_LIST_QUERY, "db2");
+        doReturn(Arrays.asList("schema3")).when(hawqClientSpy, "queryHawq", "s", SCHEMA_NAME, SCHEMA_LIST_QUERY, "db3");
+        List<String> result = hawqClientSpy.getSchemaList("s", resources);
+        Collections.sort(result);
+        assertEquals(Arrays.asList("schema1", "schema2", "schema3"), result);
+    }
+
+    @Test
+    public void testLanguageList_Success() throws Exception {
+        doReturn(Arrays.asList("language1")).when(hawqClientSpy, "queryHawq", "l", LANGUAGE_NAME, LANGUAGE_LIST_QUERY, "db1");
+        doReturn(Arrays.asList("language2")).when(hawqClientSpy, "queryHawq", "l", LANGUAGE_NAME, LANGUAGE_LIST_QUERY, "db2");
+        List<String> result = hawqClientSpy.getLanguageList("l", resources);
+        Collections.sort(result);
+        assertEquals(Arrays.asList("language1", "language2"), result);
+    }
+
+    @Test
+    public void testTableList_Success() throws Exception {
+        PowerMockito.doReturn(conn).when(hawqClientSpy, "getConnection", anyMap(), anyString());
+        when(conn.prepareStatement(TABLE_LIST_QUERY)).thenReturn(preparedStatement);
+        when(preparedStatement.executeQuery()).thenReturn(resultSet);
+        when(resultSet.next()).thenReturn(true).thenReturn(true).thenReturn(false);
+        when(resultSet.getString(RELNAME)).thenReturn("table1").thenReturn("table2");
+        when(resultSet.getString(NSPNAME)).thenReturn("schema1").thenReturn("schema2");
+        List<String> result = hawqClientSpy.getTableList("t", resources);
+        Collections.sort(result);
+        assertEquals(Arrays.asList("table1", "table2"), result);
+    }
+
+    @Test
+    public void testSequenceList_Success() throws Exception {
+        PowerMockito.doReturn(conn).when(hawqClientSpy, "getConnection", anyMap(), anyString());
+        when(conn.prepareStatement(SEQUENCE_LIST_QUERY)).thenReturn(preparedStatement);
+        when(preparedStatement.executeQuery()).thenReturn(resultSet);
+        when(resultSet.next()).thenReturn(true).thenReturn(true).thenReturn(false);
+        when(resultSet.getString(RELNAME)).thenReturn("seq1").thenReturn("seq2");
+        when(resultSet.getString(SCHEMANAME)).thenReturn("schema1").thenReturn("schema2");
+        List<String> result = hawqClientSpy.getSequenceList("s", resources);
+        Collections.sort(result);
+        assertEquals(Arrays.asList("seq1", "seq2"), result);
+    }
+
+    @Test
+    public void testSequenceList_SchemaFiltered_Success() throws Exception {
+        PowerMockito.doReturn(conn).when(hawqClientSpy, "getConnection", anyMap(), anyString());
+        when(conn.prepareStatement(SEQUENCE_LIST_QUERY)).thenReturn(preparedStatement);
+        when(preparedStatement.executeQuery()).thenReturn(resultSet);
+        when(resultSet.next()).thenReturn(true).thenReturn(true).thenReturn(false);
+        when(resultSet.getString(RELNAME)).thenReturn("seq1").thenReturn("seq2");
+        when(resultSet.getString(SCHEMANAME)).thenReturn("schema1").thenReturn("schema3");
+        assertEquals(Arrays.asList("seq1"), hawqClientSpy.getSequenceList("s", resources));
+    }
+
+    @Test
+    public void testFunctionList_Success() throws Exception {
+        PowerMockito.doReturn(conn).when(hawqClientSpy, "getConnection", anyMap(), anyString());
+        when(conn.prepareStatement(FUNCTION_LIST_QUERY)).thenReturn(preparedStatement);
+        when(preparedStatement.executeQuery()).thenReturn(resultSet);
+        when(resultSet.next()).thenReturn(true).thenReturn(true).thenReturn(false);
+        when(resultSet.getString(PRONAME)).thenReturn("fxn1").thenReturn("fxn2");
+        when(resultSet.getString(NSPNAME)).thenReturn("schema1").thenReturn("schema2");
+        assertEquals(Arrays.asList("fxn1", "fxn2"), hawqClientSpy.getFunctionList("f", resources));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/admin-plugin/src/test/java/org/apache/hawq/ranger/service/RangerServiceHawqTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/admin-plugin/src/test/java/org/apache/hawq/ranger/service/RangerServiceHawqTest.java b/ranger-plugin/admin-plugin/src/test/java/org/apache/hawq/ranger/service/RangerServiceHawqTest.java
new file mode 100644
index 0000000..5264929
--- /dev/null
+++ b/ranger-plugin/admin-plugin/src/test/java/org/apache/hawq/ranger/service/RangerServiceHawqTest.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hawq.ranger.service;
+
+import org.apache.ranger.plugin.client.BaseClient;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.powermock.api.mockito.PowerMockito;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.apache.hawq.ranger.service.HawqClient.CONNECTION_SUCCESSFUL_MESSAGE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.powermock.api.mockito.PowerMockito.mockStatic;
+import static org.powermock.api.mockito.PowerMockito.when;
+import static org.powermock.api.support.membermodification.MemberMatcher.constructor;
+import static org.powermock.api.support.membermodification.MemberModifier.suppress;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(HawqClient.class)
+public class RangerServiceHawqTest {
+
+    private RangerServiceHawq service;
+    private Map<String, String> configs;
+
+    @Mock
+    HawqClient mockHawqClient;
+    @Mock
+    Connection conn;
+
+    @Before
+    public void setup() {
+        service = new RangerServiceHawq();
+        service.setServiceName("hawq");
+        service.setServiceType("hawq");
+
+        configs = new HashMap<>();
+        configs.put("username", "username");
+        configs.put("password", "password");
+        configs.put("hostname", "localhost");
+        configs.put("port", "5432");
+
+        service.setConfigs(configs);
+
+        mockStatic(DriverManager.class);
+    }
+
+    @Test
+    public void testValidateConfigSuccess() throws Exception {
+        HashMap<String, Object> result = new HashMap<>();
+        result.put("message", "ConnectionTest Successful");
+        result.put("description", "ConnectionTest Successful");
+        result.put("connectivityStatus", true);
+
+        suppress(constructor(BaseClient.class, String.class, Map.class));
+        PowerMockito.when(DriverManager.getConnection(anyString(), any(Properties.class))).thenReturn(conn);
+        when(conn.getCatalog()).thenReturn("catalog");
+
+        HashMap<String, Object> response = service.validateConfig();
+        assertEquals(CONNECTION_SUCCESSFUL_MESSAGE, response.get("description"));
+        assertEquals(CONNECTION_SUCCESSFUL_MESSAGE, response.get("message"));
+        assertTrue((Boolean) response.get("connectivityStatus"));
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/admin-plugin/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/ranger-plugin/admin-plugin/src/test/resources/log4j.properties b/ranger-plugin/admin-plugin/src/test/resources/log4j.properties
new file mode 100644
index 0000000..903f0b6
--- /dev/null
+++ b/ranger-plugin/admin-plugin/src/test/resources/log4j.properties
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+##-- To prevent junits from cluttering the build run by default all test runs send output to null appender
+log4j.appender.devnull=org.apache.log4j.varia.NullAppender
+#hawq.ranger.root.logger=FATAL,devnull
+
+##-- uncomment the following line during during development/debugging so see debug messages during test run to be emitted to console
+hawq.ranger.root.logger=DEBUG,console
+log4j.rootLogger=${hawq.ranger.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/conf/ranger-servicedef-hawq.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/conf/ranger-servicedef-hawq.json b/ranger-plugin/conf/ranger-servicedef-hawq.json
new file mode 100644
index 0000000..03005bc
--- /dev/null
+++ b/ranger-plugin/conf/ranger-servicedef-hawq.json
@@ -0,0 +1,287 @@
+{
+  "name": "hawq",
+  "implClass": "org.apache.hawq.ranger.service.RangerServiceHawq",
+  "label": "HAWQ",
+  "description": "HAWQ",
+  "guid": "1ebb27ee-549a-401d-85ab-818342ca54af",
+  "resources":
+  [
+    {
+      "itemId": 1,
+      "name": "database",
+      "type": "string",
+      "level": 10,
+      "parent": "",
+      "mandatory": true,
+      "lookupSupported": true,
+      "recursiveSupported": false,
+      "excludesSupported": true,
+      "Matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher",
+      "matcherOptions": {"wildCard":true, "ignoreCase":true},
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "Database",
+      "description": "HAWQ Database"
+    },
+    {
+      "itemId": 2,
+      "name": "schema",
+      "type": "string",
+      "level": 20,
+      "parent": "database",
+      "mandatory": true,
+      "lookupSupported": true,
+      "recursiveSupported": false,
+      "excludesSupported": true,
+      "Matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher",
+      "matcherOptions": {"wildCard":true, "ignoreCase":true},
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "Schema",
+      "description": "HAWQ Schema"
+    },
+    {
+      "itemId": 3,
+      "name": "table",
+      "type": "string",
+      "level": 30,
+      "parent": "schema",
+      "mandatory": true,
+      "lookupSupported": true,
+      "recursiveSupported": false,
+      "excludesSupported": true,
+      "Matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher",
+      "matcherOptions": {"wildCard":true, "ignoreCase":true},
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "Table",
+      "description": "HAWQ Table"
+    },
+    {
+      "itemId": 4,
+      "name": "sequence",
+      "type": "string",
+      "level": 30,
+      "parent": "schema",
+      "mandatory": true,
+      "lookupSupported": true,
+      "recursiveSupported": false,
+      "excludesSupported": true,
+      "Matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher",
+      "matcherOptions": {"wildCard":true, "ignoreCase":true},
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "Sequence",
+      "description": "HAWQ Sequence"
+    },
+    {
+      "itemId": 5,
+      "name": "function",
+      "type": "string",
+      "level": 30,
+      "parent": "schema",
+      "mandatory": true,
+      "lookupSupported": true,
+      "recursiveSupported": false,
+      "excludesSupported": true,
+      "Matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher",
+      "matcherOptions": {"wildCard":true, "ignoreCase":true},
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "Function",
+      "description": "HAWQ Function"
+    },
+    {
+      "itemId": 6,
+      "name": "language",
+      "type": "string",
+      "level": 20,
+      "parent": "database",
+      "mandatory": true,
+      "lookupSupported": true,
+      "recursiveSupported": false,
+      "excludesSupported": true,
+      "Matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher",
+      "matcherOptions": {"wildCard":true, "ignoreCase":true},
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "Language",
+      "description": "HAWQ Language"
+    },
+    {
+      "itemId": 7,
+      "name": "tablespace",
+      "type": "string",
+      "level": 10,
+      "parent": "",
+      "mandatory": true,
+      "lookupSupported": true,
+      "recursiveSupported": false,
+      "excludesSupported": true,
+      "Matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher",
+      "matcherOptions": {"wildCard":true, "ignoreCase":true},
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "Tablespace",
+      "description": "HAWQ Tablespace"
+    },
+    {
+      "itemId": 8,
+      "name": "protocol",
+      "type": "string",
+      "level": 10,
+      "parent": "",
+      "mandatory": true,
+      "lookupSupported": true,
+      "recursiveSupported": false,
+      "excludesSupported": true,
+      "Matcher": "org.apache.ranger.plugin.resourcematcher.RangerDefaultResourceMatcher",
+      "matcherOptions": {"wildCard":true, "ignoreCase":true},
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "Protocol",
+      "description": "HAWQ Protocol"
+    }
+  ],
+  "accessTypes":
+  [
+    {
+      "itemId": 1,
+      "name": "select",
+      "label": "select"
+    },
+    {
+      "itemId": 2,
+      "name": "insert",
+      "label": "insert"
+    },
+    {
+      "itemId": 3,
+      "name": "update",
+      "label": "update"
+    },
+    {
+      "itemId": 4,
+      "name": "delete",
+      "label": "delete"
+    },
+    {
+      "itemId": 5,
+      "name": "references",
+      "label": "references"
+    },
+    {
+      "itemId": 6,
+      "name": "usage",
+      "label": "usage"
+    },
+    {
+      "itemId": 7,
+      "name": "create",
+      "label": "create"
+    },
+    {
+      "itemId": 8,
+      "name": "connect",
+      "label": "connect"
+    },
+    {
+      "itemId": 9,
+      "name": "execute",
+      "label": "execute"
+    },
+    {
+      "itemId": 10,
+      "name": "temp",
+      "label": "temp"
+    },
+    {
+      "itemId": 11,
+      "name": "create-schema",
+      "label": "create-schema"
+    },
+    {
+      "itemId": 12,
+      "name": "usage-schema",
+      "label": "usage-schema"
+    },
+    {
+      "itemId": 13,
+      "name": "all",
+      "label": "All",
+      "impliedGrants": [
+        "select",
+        "insert",
+        "update",
+        "delete",
+        "references",
+        "usage",
+        "create",
+        "connect",
+        "execute",
+        "temp",
+        "create-schema",
+        "usage-schema"
+      ]
+    }
+  ],
+
+  "configs":
+  [
+    {
+      "itemId": 1,
+      "name": "username",
+      "type": "string",
+      "mandatory": true,
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "HAWQ User Name",
+      "defaultValue": "gpadmin"
+    },
+    {
+      "itemId": 2,
+      "name": "password",
+      "type": "password",
+      "mandatory": true,
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "HAWQ User Password"
+    },
+    {
+      "itemId": 3,
+      "name": "hostname",
+      "type": "string",
+      "mandatory": true,
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "HAWQ Master Hostname"
+    },
+    {
+      "itemId": 4,
+      "name": "port",
+      "type": "int",
+      "mandatory": true,
+      "validationRegEx": "",
+      "validationMessage": "",
+      "uiHint": "",
+      "label": "HAWQ Master Port",
+      "defaultValue": 5432
+    }
+  ],
+  "enums": [],
+  "contextEnrichers": [],
+  "policyConditions": [],
+  "dataMaskDef": {},
+  "rowFilterDef": {}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/conf/tomcat-server.xml
----------------------------------------------------------------------
diff --git a/ranger-plugin/conf/tomcat-server.xml b/ranger-plugin/conf/tomcat-server.xml
new file mode 100644
index 0000000..09f9088
--- /dev/null
+++ b/ranger-plugin/conf/tomcat-server.xml
@@ -0,0 +1,60 @@
+<?xml version='1.0' encoding='utf-8'?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<!-- Note:  A "Server" is not itself a "Container", so you may not
+     define subcomponents such as "Valves" at this level.
+     Documentation at /docs/config/server.html
+ -->
+<Server port="8005" shutdown="SHUTDOWN">
+
+  <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
+  <Listener className="org.apache.catalina.core.JasperListener" />
+  <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
+  <Listener className="org.apache.catalina.mbeans.ServerLifecycleListener" />
+  <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
+
+  <GlobalNamingResources>
+    <Resource name="UserDatabase" auth="Container"
+              type="org.apache.catalina.UserDatabase"
+              description="User database that can be updated and saved"
+              factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
+              pathname="conf/tomcat-users.xml" />
+  </GlobalNamingResources>
+
+  <Service name="Catalina">
+
+    <Connector port="${http.port}" protocol="HTTP/1.1"
+               connectionTimeout="20000"
+               redirectPort="8443" />
+
+    <Engine name="Catalina" defaultHost="localhost">
+
+      <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
+             resourceName="UserDatabase"/>
+
+      <Host name="${http.host}"  appBase="webapps"
+            unpackWARs="true" autoDeploy="true"
+            xmlValidation="false" xmlNamespaceAware="false">
+
+        <Context path="/rps"
+                 docBase="/usr/local/hawq/ranger/plugin-service/webapps/rps"
+                 reloadable="false" debug="0" cookies="false"></Context>
+
+      </Host>
+    </Engine>
+  </Service>
+</Server>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/pom.xml
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/pom.xml b/ranger-plugin/integration/admin/pom.xml
new file mode 100644
index 0000000..0a81941
--- /dev/null
+++ b/ranger-plugin/integration/admin/pom.xml
@@ -0,0 +1,70 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.apache.hawq</groupId>
+    <artifactId>ranger-plugin-integration-admin</artifactId>
+    <packaging>jar</packaging>
+    <name>HAWQ Ranger Plugin - Integration Tests for Admin Plugin</name>
+    <description>HAWQ Ranger Plugin - Integration Tests for Admin Plugin</description>
+
+    <parent>
+        <groupId>org.apache.hawq</groupId>
+        <artifactId>ranger-plugin-integration</artifactId>
+        <version>2.1.0.0</version>
+        <relativePath>..</relativePath>
+    </parent>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.hawq</groupId>
+            <artifactId>ranger-plugin-admin</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.ranger</groupId>
+            <artifactId>ranger-plugins-common</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>log4j</groupId>
+            <artifactId>log4j</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>commons-logging</groupId>
+            <artifactId>commons-logging</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>postgresql</groupId>
+            <artifactId>postgresql</artifactId>
+        </dependency>
+
+        <!-- Test Dependencies -->
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+        </dependency>
+    </dependencies>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListDatabasesTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListDatabasesTest.java b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListDatabasesTest.java
new file mode 100644
index 0000000..67eecdd
--- /dev/null
+++ b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListDatabasesTest.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.admin;
+
+import com.google.common.collect.Sets;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ListDatabasesTest extends LookupTestBase {
+
+    private static final Set<String> DATABASES = Sets.newHashSet("postgres", "east", "west", "noschema_db");
+
+    @Test
+    public void testListDatabases_All() throws Exception {
+        List<String> result = service.lookupResource(getContext("database", "*"));
+        assertEquals(4, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet(DATABASES)));
+    }
+
+    @Test
+    public void testListDatabases_FilteredPresent() throws Exception {
+        List<String> result = service.lookupResource(getContext("database", "e"));
+        assertEquals(1, result.size());
+        assertEquals(result.get(0), "east");
+    }
+
+    @Test
+    public void testListDatabases_FilteredAbsent() throws Exception {
+        List<String> result = service.lookupResource(getContext("database", "z"));
+        assertTrue(result.isEmpty());
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListFunctionsTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListFunctionsTest.java b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListFunctionsTest.java
new file mode 100644
index 0000000..7b68d78
--- /dev/null
+++ b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListFunctionsTest.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific schema governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.admin;
+
+import com.google.common.collect.Sets;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Arrays;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ListFunctionsTest extends LookupTestBase {
+
+    private Map<String, List<String>> resources;
+
+    @Before
+    public void setUp() {
+        resources = new HashMap<>();
+    }
+
+    @Test
+    public void testListFunctions_NoSchemaDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("noschema_db"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("function", "*", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListFunctions_SingleDb_SingleSchema_AllFilter_NoFunctions() throws Exception {
+        resources.put("database", Arrays.asList("west"));
+        resources.put("schema", Arrays.asList("jamaica"));
+        List<String> result = service.lookupResource(getContext("function", "*", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListFunctions_SingleDb_SingleSchema_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("function", "*", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("eat", "stand")));
+    }
+
+    @Test
+    public void testListFunctions_SingleDb_TwoSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("common", "japan"));
+        List<String> result = service.lookupResource(getContext("function", "*", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("eat", "sleep", "stand")));
+    }
+
+    @Test
+    public void testListFunctions_SingleDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("function", "*", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("eat", "sleep", "stand")));
+    }
+
+    @Test
+    public void testListFunctions_TwoDb_CommonSchema_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("common"));
+        List<String> result = service.lookupResource(getContext("function", "*", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("eat", "sleep")));
+    }
+
+    @Test
+    public void testListFunctions_TwoDb_SingleSchema_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("function", "*", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("eat", "stand")));
+    }
+
+    @Test
+    public void testListFunctions_TwoDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("function", "*", resources));
+        assertEquals(4, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("eat", "sleep", "stand", "smile")));
+    }
+
+    @Test
+    public void testListFunctions_AllDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("function", "*", resources));
+        assertEquals(4, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("eat", "sleep", "stand", "smile")));
+    }
+
+    @Test
+    public void testListFunctions_SingleDb_SingleSchema_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("function", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListFunctions_SingleDb_TwoSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("common", "japan"));
+        List<String> result = service.lookupResource(getContext("function", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListFunctions_SingleDb_AllSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("function", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListFunctions_TwoDbs_CommonSchema_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("common"));
+        List<String> result = service.lookupResource(getContext("function", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListFunctions_TwoDbs_SingleSchema_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("function", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListFunctions_TwoDbs_AllSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("function", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListFunctions_AllDbs_AllSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("function", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListFunctions_SingleDb_SingleSchema_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("function", "s", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("stand")));
+    }
+
+    @Test
+    public void testListFunctions_SingleDb_TwoSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("common", "japan"));
+        List<String> result = service.lookupResource(getContext("function", "s", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sleep", "stand")));
+    }
+
+    @Test
+    public void testListFunctions_SingleDb_AllSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("function", "s", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sleep", "stand")));
+    }
+
+    @Test
+    public void testListFunctions_SingleDb_AllSchemas_FilteredPresent2() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("function", "e", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("eat")));
+    }
+
+    @Test
+    public void testListFunctions_TwoDbs_CommonSchema_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("common"));
+        List<String> result = service.lookupResource(getContext("function", "e", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("eat")));
+    }
+
+    @Test
+    public void testListFunctions_TwoDbs_SingleSchema_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("function", "s", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("stand")));
+    }
+
+    @Test
+    public void testListFunctions_TwoDbs_AllSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("function", "s", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sleep", "stand", "smile")));
+    }
+
+    @Test
+    public void testListFunctions_AllDbs_AllSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("function", "s", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sleep", "stand", "smile")));
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListLanguagesTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListLanguagesTest.java b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListLanguagesTest.java
new file mode 100644
index 0000000..726ef7e
--- /dev/null
+++ b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListLanguagesTest.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.admin;
+
+import com.google.common.collect.Sets;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Set;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Arrays;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ListLanguagesTest extends LookupTestBase {
+
+    private static final Set<String> DEFAULT_LANGUAGES = Sets.newHashSet("c", "internal", "plpgsql", "sql");
+    private static final Set<String> EAST_LANGUAGES = Sets.newHashSet("langdbeast", "c", "internal", "plpgsql", "sql");
+
+    private Map<String, List<String>> resources = new HashMap<>();
+
+    @Test
+    public void testListLanguage_NoResources() throws Exception {
+        resources.put("database", Arrays.asList("noschema_db"));
+        List<String> result = service.lookupResource(getContext("language", "*", resources));
+        assertEquals(4, result.size());
+        assertTrue(Sets.newHashSet(result).equals(DEFAULT_LANGUAGES));
+    }
+
+    @Test
+    public void testListLanguages_SingleDb_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        List<String> result = service.lookupResource(getContext("language", "*", resources));
+        assertEquals(5, result.size());
+        assertTrue(Sets.newHashSet(result).equals(EAST_LANGUAGES));
+    }
+
+    @Test
+    public void testListLanguages_TwoDb_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        List<String> result = service.lookupResource(getContext("language", "*", resources));
+        assertEquals(6, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("langdbeast", "langdbwest", "c", "internal", "plpgsql", "sql")));
+    }
+
+    @Test
+    public void testListLanguages_AllDb_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("language", "*", resources));
+        assertEquals(6, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("langdbeast", "langdbwest", "c", "internal", "plpgsql", "sql")));
+    }
+
+    @Test
+    public void testListLanguages_SingleDb_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        List<String> result = service.lookupResource(getContext("language", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListLanguages_TwoDb_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        List<String> result = service.lookupResource(getContext("language", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListLanguages_AllDb_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("language", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListLanguages_SingleDb_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        List<String> result = service.lookupResource(getContext("language", "l", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("langdbeast")));
+    }
+
+    @Test
+    public void testListLanguages_TwoDb_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        List<String> result = service.lookupResource(getContext("language", "l", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("langdbeast", "langdbwest")));
+    }
+
+    @Test
+    public void testListLanguages_AllDb_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("language", "l", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("langdbeast", "langdbwest")));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListProtocolsTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListProtocolsTest.java b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListProtocolsTest.java
new file mode 100644
index 0000000..d46febc
--- /dev/null
+++ b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListProtocolsTest.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.admin;
+
+import com.google.common.collect.Sets;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ListProtocolsTest extends LookupTestBase {
+
+    private static final Set<String> PROTOCOLS = Sets.newHashSet("file", "ftp", "gpfdist", "gpfdists", "http", "pxf");
+
+    @Test
+    public void testListProtocols_All() throws Exception {
+        List<String> result = service.lookupResource(getContext("protocol", "*"));
+        assertEquals(6, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet(PROTOCOLS)));
+    }
+
+    @Test
+    public void testListProtocols_FilteredPresent() throws Exception {
+        List<String> result = service.lookupResource(getContext("protocol", "h"));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("http")));
+    }
+
+    @Test
+    public void testListProtocols_FilteredAbsent() throws Exception {
+        List<String> result = service.lookupResource(getContext("protocol", "z"));
+        assertTrue(result.isEmpty());
+    }
+
+}



[18/50] [abbrv] incubator-hawq git commit: HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base.

Posted by es...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/log4j.properties b/contrib/hawq-docker/centos6-docker/hawq-test/conf/log4j.properties
new file mode 100644
index 0000000..c901ab1
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/log4j.properties
@@ -0,0 +1,291 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=ALL
+
+# Null Appender
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Rolling File Appender - cap space usage at 5gb.
+#
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollover at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# HDFS block state change log from block manager
+#
+# Uncomment the following to suppress normal block state change
+# messages from BlockManager in NameNode.
+#log4j.logger.BlockStateChange=WARN
+
+#
+#Security appender
+#
+hadoop.security.logger=INFO,NullAppender
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+#
+# hadoop configuration logging
+#
+
+# Uncomment the following line to turn off configuration deprecation warnings.
+# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,NullAppender
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
+
+#
+# NameNode metrics logging.
+# The default is to retain two namenode-metrics.log files up to 64MB each.
+#
+namenode.metrics.logger=INFO,NullAppender
+log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
+log4j.additivity.NameNodeMetricsLog=false
+log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
+log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
+log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
+log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,NullAppender
+mapred.audit.log.maxfilesize=256MB
+mapred.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+# AWS SDK & S3A FileSystem
+log4j.logger.com.amazonaws=ERROR
+log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
+log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file :
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
+hadoop.mapreduce.jobsummary.log.maxbackupindex=20
+log4j.appender.JSA=org.apache.log4j.RollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+#
+# Yarn ResourceManager Application Summary Log 
+#
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# To enable AppSummaryLogging for the RM, 
+# set yarn.server.resourcemanager.appsummary.logger to 
+# <LEVEL>,RMSUMMARY in hadoop-env.sh
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+
+# HS audit log configs
+#mapreduce.hs.audit.logger=INFO,HSAUDIT
+#log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
+#log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
+#log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
+#log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
+#log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
+
+# Http Server Request Logs
+#log4j.logger.http.requests.namenode=INFO,namenoderequestlog
+#log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
+#log4j.appender.namenoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.datanode=INFO,datanoderequestlog
+#log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
+#log4j.appender.datanoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
+#log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
+#log4j.appender.resourcemanagerrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
+#log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
+#log4j.appender.jobhistoryrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
+#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
+#log4j.appender.nodemanagerrequestlog.RetainDays=3
+
+# Appender for viewing information for errors and warnings
+yarn.ewma.cleanupInterval=300
+yarn.ewma.messageAgeLimitSeconds=86400
+yarn.ewma.maxUniqueMessages=250
+log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
+log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
+log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
+log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.cmd b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.cmd
new file mode 100644
index 0000000..0d39526
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.cmd
@@ -0,0 +1,20 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
+
+set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.sh b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.sh
new file mode 100644
index 0000000..6be1e27
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.sh
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+
+export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
+
+export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+
+#export HADOOP_JOB_HISTORYSERVER_OPTS=
+#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
+#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-queues.xml.template
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-queues.xml.template b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-queues.xml.template
new file mode 100644
index 0000000..ce6cd20
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-queues.xml.template
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- This is the template for queue configuration. The format supports nesting of
+     queues within queues - a feature called hierarchical queues. All queues are
+     defined within the 'queues' tag which is the top level element for this
+     XML document. The queue acls configured here for different queues are
+     checked for authorization only if the configuration property
+     mapreduce.cluster.acls.enabled is set to true. -->
+<queues>
+
+  <!-- Configuration for a queue is specified by defining a 'queue' element. -->
+  <queue>
+
+    <!-- Name of a queue. Queue name cannot contain a ':'  -->
+    <name>default</name>
+
+    <!-- properties for a queue, typically used by schedulers,
+    can be defined here -->
+    <properties>
+    </properties>
+
+	<!-- State of the queue. If running, the queue will accept new jobs.
+         If stopped, the queue will not accept new jobs. -->
+    <state>running</state>
+
+    <!-- Specifies the ACLs to check for submitting jobs to this queue.
+         If set to '*', it allows all users to submit jobs to the queue.
+         If set to ' '(i.e. space), no user will be allowed to do this
+         operation. The default value for any queue acl is ' '.
+         For specifying a list of users and groups the format to use is
+         user1,user2 group1,group2
+
+         It is only used if authorization is enabled in Map/Reduce by setting
+         the configuration property mapreduce.cluster.acls.enabled to true.
+
+         Irrespective of this ACL configuration, the user who started the
+         cluster and cluster administrators configured via
+         mapreduce.cluster.administrators can do this operation. -->
+    <acl-submit-job> </acl-submit-job>
+
+    <!-- Specifies the ACLs to check for viewing and modifying jobs in this
+         queue. Modifications include killing jobs, tasks of jobs or changing
+         priorities.
+         If set to '*', it allows all users to view, modify jobs of the queue.
+         If set to ' '(i.e. space), no user will be allowed to do this
+         operation.
+         For specifying a list of users and groups the format to use is
+         user1,user2 group1,group2
+
+         It is only used if authorization is enabled in Map/Reduce by setting
+         the configuration property mapreduce.cluster.acls.enabled to true.
+
+         Irrespective of this ACL configuration, the user who started the
+         cluster  and cluster administrators configured via
+         mapreduce.cluster.administrators can do the above operations on all
+         the jobs in all the queues. The job owner can do all the above
+         operations on his/her job irrespective of this ACL configuration. -->
+    <acl-administer-jobs> </acl-administer-jobs>
+  </queue>
+
+  <!-- Here is a sample of a hierarchical queue configuration
+       where q2 is a child of q1. In this example, q2 is a leaf level
+       queue as it has no queues configured within it. Currently, ACLs
+       and state are only supported for the leaf level queues.
+       Note also the usage of properties for the queue q2.
+  <queue>
+    <name>q1</name>
+    <queue>
+      <name>q2</name>
+      <properties>
+        <property key="capacity" value="20"/>
+        <property key="user-limit" value="30"/>
+      </properties>
+    </queue>
+  </queue>
+ -->
+</queues>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-site.xml.template
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-site.xml.template b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-site.xml.template
new file mode 100644
index 0000000..761c352
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-site.xml.template
@@ -0,0 +1,21 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/slaves
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/slaves b/contrib/hawq-docker/centos6-docker/hawq-test/conf/slaves
new file mode 100644
index 0000000..2fbb50c
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/slaves
@@ -0,0 +1 @@
+localhost

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-client.xml.example
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-client.xml.example b/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-client.xml.example
new file mode 100644
index 0000000..a50dce4
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-client.xml.example
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+
+<property>
+  <name>ssl.client.truststore.location</name>
+  <value></value>
+  <description>Truststore to be used by clients like distcp. Must be
+  specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.truststore.password</name>
+  <value></value>
+  <description>Optional. Default value is "".
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.truststore.type</name>
+  <value>jks</value>
+  <description>Optional. The keystore file format, default value is "jks".
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.truststore.reload.interval</name>
+  <value>10000</value>
+  <description>Truststore reload check interval, in milliseconds.
+  Default value is 10000 (10 seconds).
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.keystore.location</name>
+  <value></value>
+  <description>Keystore to be used by clients like distcp. Must be
+  specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.keystore.password</name>
+  <value></value>
+  <description>Optional. Default value is "".
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.keystore.keypassword</name>
+  <value></value>
+  <description>Optional. Default value is "".
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.keystore.type</name>
+  <value>jks</value>
+  <description>Optional. The keystore file format, default value is "jks".
+  </description>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-server.xml.example
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-server.xml.example b/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-server.xml.example
new file mode 100644
index 0000000..02d300c
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-server.xml.example
@@ -0,0 +1,78 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+
+<property>
+  <name>ssl.server.truststore.location</name>
+  <value></value>
+  <description>Truststore to be used by NN and DN. Must be specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.truststore.password</name>
+  <value></value>
+  <description>Optional. Default value is "".
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.truststore.type</name>
+  <value>jks</value>
+  <description>Optional. The keystore file format, default value is "jks".
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.truststore.reload.interval</name>
+  <value>10000</value>
+  <description>Truststore reload check interval, in milliseconds.
+  Default value is 10000 (10 seconds).
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.location</name>
+  <value></value>
+  <description>Keystore to be used by NN and DN. Must be specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.password</name>
+  <value></value>
+  <description>Must be specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.keypassword</name>
+  <value></value>
+  <description>Must be specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.type</name>
+  <value>jks</value>
+  <description>Optional. The keystore file format, default value is "jks".
+  </description>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/yarn-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/yarn-env.cmd b/contrib/hawq-docker/centos6-docker/hawq-test/conf/yarn-env.cmd
new file mode 100644
index 0000000..74da35b
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/yarn-env.cmd
@@ -0,0 +1,60 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem User for YARN daemons
+if not defined HADOOP_YARN_USER (
+  set HADOOP_YARN_USER=%yarn%
+)
+
+if not defined YARN_CONF_DIR (
+  set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf
+)
+
+if defined YARN_HEAPSIZE (
+  @rem echo run with Java heapsize %YARN_HEAPSIZE%
+  set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m
+)
+
+if not defined YARN_LOG_DIR (
+  set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs
+)
+
+if not defined YARN_LOGFILE (
+  set YARN_LOGFILE=yarn.log
+)
+
+@rem default policy file for service-level authorization
+if not defined YARN_POLICYFILE (
+  set YARN_POLICYFILE=hadoop-policy.xml
+)
+
+if not defined YARN_ROOT_LOGGER (
+  set YARN_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console
+)
+
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER%
+if defined JAVA_LIBRARY_PATH (
+  set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
+)
+set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE%
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh b/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
new file mode 100755
index 0000000..2c03287
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+if [ -z "${NAMENODE}" ]; then
+  export NAMENODE=${HOSTNAME}
+fi
+
+if [ ! -f /etc/profile.d/hadoop.sh ]; then
+  echo '#!/bin/bash' | sudo tee /etc/profile.d/hadoop.sh
+  echo "export NAMENODE=${NAMENODE}" | sudo tee -a /etc/profile.d/hadoop.sh
+  sudo chmod a+x /etc/profile.d/hadoop.sh
+fi
+
+sudo start-hdfs.sh
+sudo sysctl -p
+sudo ln -s /usr/lib/libthrift-0.9.1.so /usr/lib64/libthrift-0.9.1.so
+
+exec "$@"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh b/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
new file mode 100755
index 0000000..076fb0a
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+/etc/init.d/sshd start
+
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ "${NAMENODE}" == "${HOSTNAME}" ]; then
+  if [ ! -d /tmp/hdfs/name/current ]; then
+    su -l hdfs -c "hdfs namenode -format"
+  fi
+  
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.namenode.NameNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start namenode"
+  fi
+else
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.datanode.DataNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start datanode"
+  fi
+fi
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
new file mode 100644
index 0000000..58d4ef0
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
@@ -0,0 +1,75 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM centos:7
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+# install all software we need
+RUN yum install -y epel-release && \
+ yum makecache && \
+ yum install -y man passwd sudo tar which git mlocate links make bzip2 net-tools \
+ autoconf automake libtool m4 gcc gcc-c++ gdb bison flex cmake gperf maven indent \
+ libuuid-devel krb5-devel libgsasl-devel expat-devel libxml2-devel \
+ perl-ExtUtils-Embed pam-devel python-devel libcurl-devel snappy-devel \
+ thrift-devel libyaml-devel libevent-devel bzip2-devel openssl-devel \
+ openldap-devel protobuf-devel readline-devel net-snmp-devel apr-devel \
+ libesmtp-devel python-pip json-c-devel \
+ java-1.7.0-openjdk-devel lcov cmake \
+ openssh-clients openssh-server perl-JSON && \
+ yum clean all
+
+RUN pip --retries=50 --timeout=300 install pycrypto
+
+# OS requirement
+RUN echo "kernel.sem = 250 512000 100 2048" >> /etc/sysctl.conf
+
+# setup ssh server and keys for root
+RUN sshd-keygen && \
+ ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+# create user gpadmin since HAWQ cannot run under root
+RUN groupadd -g 1000 gpadmin && \
+ useradd -u 1000 -g 1000 gpadmin && \
+ echo "gpadmin  ALL=(ALL)       NOPASSWD: ALL" > /etc/sudoers.d/gpadmin
+
+# sudo should not require tty
+RUN sed -i -e 's|Defaults    requiretty|#Defaults    requiretty|' /etc/sudoers
+
+# setup JAVA_HOME for all users
+RUN echo "#!/bin/sh" > /etc/profile.d/java.sh && \
+ echo "export JAVA_HOME=/etc/alternatives/java_sdk" >> /etc/profile.d/java.sh && \
+ chmod a+x /etc/profile.d/java.sh
+
+# set USER env
+RUN echo "#!/bin/bash" > /etc/profile.d/user.sh && \
+ echo "export USER=\`whoami\`" >> /etc/profile.d/user.sh && \
+ chmod a+x /etc/profile.d/user.sh
+
+ENV BASEDIR /data
+RUN mkdir -p /data && chmod 777 /data
+
+USER gpadmin
+
+# setup ssh client keys for gpadmin
+RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+WORKDIR /data

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
new file mode 100644
index 0000000..ea5e22c
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM hawq/hawq-dev:centos7
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+USER root
+
+## install HDP 2.5.0
+RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.0.0/hdp.repo" -o /etc/yum.repos.d/hdp.repo && \
+ yum install -y hadoop hadoop-hdfs hadoop-libhdfs hadoop-yarn hadoop-mapreduce hadoop-client hdp-select && \
+ yum clean all
+
+RUN ln -s /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh /usr/bin/hadoop-daemon.sh
+
+COPY conf/* /etc/hadoop/conf/
+
+COPY entrypoint.sh /usr/bin/entrypoint.sh
+COPY start-hdfs.sh /usr/bin/start-hdfs.sh
+
+USER gpadmin
+
+ENTRYPOINT ["entrypoint.sh"]
+CMD ["bash"]
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/capacity-scheduler.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/capacity-scheduler.xml
new file mode 100644
index 0000000..30f4eb9
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/capacity-scheduler.xml
@@ -0,0 +1,134 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.1</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run 
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.resource-calculator</name>
+    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    <description>
+      The ResourceCalculator implementation to be used to compare 
+      Resources in the scheduler.
+      The default i.e. DefaultResourceCalculator only uses Memory while
+      DominantResourceCalculator uses dominant-resource to compare 
+      multi-dimensional resources such as Memory, CPU etc.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.node-locality-delay</name>
+    <value>40</value>
+    <description>
+      Number of missed scheduling opportunities after which the CapacityScheduler 
+      attempts to schedule rack-local containers. 
+      Typically this should be set to number of nodes in the cluster, By default is setting 
+      approximately number of nodes in one rack which is 40.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.queue-mappings</name>
+    <value></value>
+    <description>
+      A list of mappings that will be used to assign jobs to queues
+      The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
+      Typically this list will be used to map users to queues,
+      for example, u:%user:%user maps all users to queues with the same name
+      as the user.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
+    <value>false</value>
+    <description>
+      If a queue mapping is present, will it override the value specified
+      by the user? This can be used by administrators to place jobs in queues
+      that are different than the one specified by the user.
+      The default is false.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/configuration.xsl
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/configuration.xsl b/contrib/hawq-docker/centos7-docker/hawq-test/conf/configuration.xsl
new file mode 100644
index 0000000..d50d80b
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/configuration.xsl
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+<tr>
+  <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+  <td><xsl:value-of select="value"/></td>
+  <td><xsl:value-of select="description"/></td>
+</tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/container-executor.cfg
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/container-executor.cfg b/contrib/hawq-docker/centos7-docker/hawq-test/conf/container-executor.cfg
new file mode 100644
index 0000000..d68cee8
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/container-executor.cfg
@@ -0,0 +1,4 @@
+yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group
+banned.users=#comma separated list of users who can not run applications
+min.user.id=1000#Prevent other super-users
+allowed.system.users=##comma separated list of system users who CAN run applications

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
new file mode 100644
index 0000000..afc37fc
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+	<property>
+		<name>fs.defaultFS</name>
+		<value>hdfs://${hdfs.namenode}:8020</value>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.cmd b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.cmd
new file mode 100644
index 0000000..bb40ec9
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.cmd
@@ -0,0 +1,92 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem Set Hadoop-specific environment variables here.
+
+@rem The only required environment variable is JAVA_HOME.  All others are
+@rem optional.  When running a distributed configuration it is best to
+@rem set JAVA_HOME in this file, so that it is correctly defined on
+@rem remote nodes.
+
+@rem The java implementation to use.  Required.
+set JAVA_HOME=%JAVA_HOME%
+
+@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
+@rem set JSVC_HOME=%JSVC_HOME%
+
+@rem set HADOOP_CONF_DIR=
+
+@rem Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+if exist %HADOOP_HOME%\contrib\capacity-scheduler (
+  if not defined HADOOP_CLASSPATH (
+    set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+  ) else (
+    set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+  )
+)
+
+@rem If TEZ_CLASSPATH is defined in the env, that means that TEZ is enabled
+@rem append it to the HADOOP_CLASSPATH
+
+if defined TEZ_CLASSPATH (
+  if not defined HADOOP_CLASSPATH (
+    set HADOOP_CLASSPATH=%TEZ_CLASSPATH%
+  ) else (
+    set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%TEZ_CLASSPATH%
+  )
+)
+
+@rem The maximum amount of heap to use, in MB. Default is 1000.
+@rem set HADOOP_HEAPSIZE=
+@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+@rem Extra Java runtime options.  Empty by default.
+@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
+
+@rem Command specific options appended to HADOOP_OPTS when specified
+if not defined HADOOP_SECURITY_LOGGER (
+  set HADOOP_SECURITY_LOGGER=INFO,RFAS
+)
+if not defined HDFS_AUDIT_LOGGER (
+  set HDFS_AUDIT_LOGGER=INFO,NullAppender
+)
+
+set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
+set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
+set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
+
+@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
+@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
+
+@rem On secure datanodes, user to run the datanode as after dropping privileges
+set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
+
+@rem Where log files are stored.  %HADOOP_HOME%/logs by default.
+@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
+
+@rem Where log files are stored in the secure data environment.
+set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
+
+@rem The directory where pid files are stored. /tmp by default.
+@rem NOTE: this should be set to a directory that can only be written to by 
+@rem       the user that will run the hadoop daemons.  Otherwise there is the
+@rem       potential for a symlink attack.
+set HADOOP_PID_DIR=%HADOOP_PID_DIR%
+set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
+
+@rem A string representing this instance of hadoop. %USERNAME% by default.
+set HADOOP_IDENT_STRING=%USERNAME%

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
new file mode 100644
index 0000000..95511ed
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
@@ -0,0 +1,110 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.
+export JAVA_HOME=/etc/alternatives/java_sdk
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol.  Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+#export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+#for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+#  if [ "$HADOOP_CLASSPATH" ]; then
+#    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+#  else
+#    export HADOOP_CLASSPATH=$f
+#  fi
+#done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Setup environment variable for docker image
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ -z "${NAMENODE}" ]; then
+  echo "environment variable NAMENODE is not set!"
+  exit 1
+fi
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Dhdfs.namenode=${NAMENODE}"
+#export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+#export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
+#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+
+#export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+#export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+#export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+#export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol.  This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+#export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=/var/log/hadoop
+export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+#export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by
+#       the user that will run the hadoop daemons.  Otherwise there is the
+#       potential for a symlink attack.
+#export HADOOP_PID_DIR=${HADOOP_PID_DIR}
+#export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+
+# A string representing this instance of hadoop. $USER by default.
+#export HADOOP_IDENT_STRING=$USER

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics.properties b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics.properties
new file mode 100644
index 0000000..c1b2eb7
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics.properties
@@ -0,0 +1,75 @@
+# Configuration of the "dfs" context for null
+dfs.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "dfs" context for file
+#dfs.class=org.apache.hadoop.metrics.file.FileContext
+#dfs.period=10
+#dfs.fileName=/tmp/dfsmetrics.log
+
+# Configuration of the "dfs" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# dfs.period=10
+# dfs.servers=localhost:8649
+
+
+# Configuration of the "mapred" context for null
+mapred.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "mapred" context for file
+#mapred.class=org.apache.hadoop.metrics.file.FileContext
+#mapred.period=10
+#mapred.fileName=/tmp/mrmetrics.log
+
+# Configuration of the "mapred" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# mapred.period=10
+# mapred.servers=localhost:8649
+
+
+# Configuration of the "jvm" context for null
+#jvm.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "jvm" context for file
+#jvm.class=org.apache.hadoop.metrics.file.FileContext
+#jvm.period=10
+#jvm.fileName=/tmp/jvmmetrics.log
+
+# Configuration of the "jvm" context for ganglia
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# jvm.period=10
+# jvm.servers=localhost:8649
+
+# Configuration of the "rpc" context for null
+rpc.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "rpc" context for file
+#rpc.class=org.apache.hadoop.metrics.file.FileContext
+#rpc.period=10
+#rpc.fileName=/tmp/rpcmetrics.log
+
+# Configuration of the "rpc" context for ganglia
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# rpc.period=10
+# rpc.servers=localhost:8649
+
+
+# Configuration of the "ugi" context for null
+ugi.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "ugi" context for file
+#ugi.class=org.apache.hadoop.metrics.file.FileContext
+#ugi.period=10
+#ugi.fileName=/tmp/ugimetrics.log
+
+# Configuration of the "ugi" context for ganglia
+# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# ugi.period=10
+# ugi.servers=localhost:8649
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics2.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics2.properties b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics2.properties
new file mode 100644
index 0000000..0c09228
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics2.properties
@@ -0,0 +1,68 @@
+# syntax: [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
+# default sampling period, in seconds
+*.period=10
+
+# The namenode-metrics.out will contain metrics from all context
+#namenode.sink.file.filename=namenode-metrics.out
+# Specifying a special sampling period for namenode:
+#namenode.sink.*.period=8
+
+#datanode.sink.file.filename=datanode-metrics.out
+
+#resourcemanager.sink.file.filename=resourcemanager-metrics.out
+
+#nodemanager.sink.file.filename=nodemanager-metrics.out
+
+#mrappmaster.sink.file.filename=mrappmaster-metrics.out
+
+#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
+
+# the following example split metrics of different
+# context to different sinks (in this case files)
+#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_jvm.context=jvm
+#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
+#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_mapred.context=mapred
+#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
+
+#
+# Below are for sending metrics to Ganglia
+#
+# for Ganglia 3.0 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
+#
+# for Ganglia 3.1 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+
+# *.sink.ganglia.period=10
+
+# default for supportsparse is false
+# *.sink.ganglia.supportsparse=true
+
+#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Tag values to use for the ganglia prefix. If not defined no tags are used.
+# If '*' all tags are used. If specifiying multiple tags separate them with 
+# commas. Note that the last segment of the property name is the context name.
+#
+#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
+#*.sink.ganglia.tagsForPrefix.dfs=
+#*.sink.ganglia.tagsForPrefix.rpc=
+#*.sink.ganglia.tagsForPrefix.mapred=
+
+#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-policy.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-policy.xml
new file mode 100644
index 0000000..2bf5c02
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-policy.xml
@@ -0,0 +1,226 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ 
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.user.mappings.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.ha.service.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HAService protocol used by HAAdmin to manage the
+      active and stand-by states of namenode.</description>
+  </property>
+
+  <property>
+    <name>security.zkfc.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for access to the ZK Failover Controller
+    </description>
+  </property>
+
+  <property>
+    <name>security.qjournal.service.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for QJournalProtocol, used by the NN to communicate with
+    JNs when using the QuorumJournalManager for edit logs.</description>
+  </property>
+
+  <property>
+    <name>security.mrhs.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HSClientProtocol, used by job clients to
+    communciate with the MR History Server job status etc. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <!-- YARN Protocols -->
+
+  <property>
+    <name>security.resourcetracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceTrackerProtocol, used by the
+    ResourceManager and NodeManager to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.resourcemanager-administration.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceManagerAdministrationProtocol, for admin commands. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationclient.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationClientProtocol, used by the ResourceManager 
+    and applications submission clients to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationmaster.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationMasterProtocol, used by the ResourceManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.containermanagement.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.resourcelocalizer.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceLocalizer protocol, used by the NodeManager 
+    and ResourceLocalizer to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for MRClientProtocol, used by job clients to
+    communciate with the MR ApplicationMaster to query job status etc. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationhistory.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationHistoryProtocol, used by the timeline
+    server and the generic history service client to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hdfs-site.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hdfs-site.xml
new file mode 100644
index 0000000..3f4f152
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hdfs-site.xml
@@ -0,0 +1,100 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+	<property>
+		<name>dfs.name.dir</name>
+		<value>/tmp/hdfs/name</value>
+		<final>true</final>
+	</property>
+
+	<property>
+		<name>dfs.data.dir</name>
+		<value>/tmp/hdfs/data</value>
+		<final>true</final>
+	</property>
+
+	<property>
+		<name>dfs.permissions</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.support.append</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.block.local-path-access.user</name>
+		<value>${user.name}</value>
+	</property>
+
+	<property>
+		<name>dfs.replication</name>
+		<value>3</value>
+	</property>
+
+	<property>
+		<name>dfs.datanode.socket.write.timeout</name>
+		<value>0</value>
+		<description>
+			used for sockets to and from datanodes. It is 8 minutes by default. Some
+			users set this to 0, effectively disabling the write timeout.
+		</description>
+	</property>
+
+	<property>
+		<name>dfs.webhdfs.enabled</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.allow.truncate</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.namenode.fs-limits.min-block-size</name>
+		<value>1024</value>
+	</property>
+
+	<property>
+		<name>dfs.client.read.shortcircuit</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.domain.socket.path</name>
+		<value>/var/lib/hadoop-hdfs/dn_socket</value>
+	</property>
+
+	<property>
+		<name>dfs.block.access.token.enable</name>
+		<value>true</value>
+		<description>
+			If "true", access tokens are used as capabilities for accessing
+			datanodes.
+			If "false", no access tokens are checked on accessing datanodes.
+		</description>
+	</property>
+	
+	<property>
+		<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
+		<value>false</value>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-acls.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-acls.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-acls.xml
new file mode 100644
index 0000000..cba69f4
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-acls.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+  <!-- This file is hot-reloaded when it changes -->
+
+  <!-- KMS ACLs -->
+
+  <property>
+    <name>hadoop.kms.acl.CREATE</name>
+    <value>*</value>
+    <description>
+      ACL for create-key operations.
+      If the user is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DELETE</name>
+    <value>*</value>
+    <description>
+      ACL for delete-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.ROLLOVER</name>
+    <value>*</value>
+    <description>
+      ACL for rollover-key operations.
+      If the user is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-version and get-current-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_KEYS</name>
+    <value>*</value>
+    <description>
+      ACL for get-keys operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_METADATA</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-metadata and get-keys-metadata operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
+    <value>*</value>
+    <description>
+      Complementary ACL for CREATE and ROLLOVER operations to allow the client
+      to provide the key material when creating or rolling a key.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GENERATE_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for generateEncryptedKey CryptoExtension operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DECRYPT_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for decryptEncryptedKey CryptoExtension operations.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.MANAGEMENT</name>
+    <value>*</value>
+    <description>
+      default ACL for MANAGEMENT operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.GENERATE_EEK</name>
+    <value>*</value>
+    <description>
+      default ACL for GENERATE_EEK operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.DECRYPT_EEK</name>
+    <value>*</value>
+    <description>
+      default ACL for DECRYPT_EEK operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.READ</name>
+    <value>*</value>
+    <description>
+      default ACL for READ operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-env.sh b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-env.sh
new file mode 100644
index 0000000..44dfe6a
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-env.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License. See accompanying LICENSE file.
+#
+
+# Set kms specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs KMS
+# Java System properties for KMS should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# KMS logs directory
+#
+# export KMS_LOG=${KMS_HOME}/logs
+
+# KMS temporary directory
+#
+# export KMS_TEMP=${KMS_HOME}/temp
+
+# The HTTP port used by KMS
+#
+# export KMS_HTTP_PORT=16000
+
+# The Admin port used by KMS
+#
+# export KMS_ADMIN_PORT=`expr ${KMS_HTTP_PORT} + 1`
+
+# The maximum number of Tomcat handler threads
+#
+# export KMS_MAX_THREADS=1000
+
+# The location of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
+
+# The password of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_PASS=password
+
+# The full path to any native libraries that need to be loaded
+# (For eg. location of natively compiled tomcat Apache portable
+# runtime (APR) libraries
+#
+# export JAVA_LIBRARY_PATH=${HOME}/lib/native

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-log4j.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-log4j.properties b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-log4j.properties
new file mode 100644
index 0000000..8e6d909
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-log4j.properties
@@ -0,0 +1,38 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'kms.log.dir' is not defined at KMS start up time
+# Setup sets its value to '${kms.home}/logs'
+
+log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms.File=${kms.log.dir}/kms.log
+log4j.appender.kms.Append=true
+log4j.appender.kms.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n
+
+log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log
+log4j.appender.kms-audit.Append=true
+log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n
+
+log4j.logger.kms-audit=INFO, kms-audit
+log4j.additivity.kms-audit=false
+
+log4j.rootLogger=ALL, kms
+log4j.logger.org.apache.hadoop.conf=ERROR
+log4j.logger.org.apache.hadoop=INFO
+log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
\ No newline at end of file


[39/50] [abbrv] incubator-hawq git commit: HAWQ-1285. resource manager outputs uninitialized string as host name

Posted by es...@apache.org.
HAWQ-1285. resource manager outputs uninitialized string as host name


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/d2608dec
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/d2608dec
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/d2608dec

Branch: refs/heads/2.1.0.0-incubating
Commit: d2608dec4df52297b883e16893f2fda75242264c
Parents: 5da0476
Author: Yi <yj...@pivotal.io>
Authored: Mon Jan 23 15:25:39 2017 +1100
Committer: Yi <yj...@pivotal.io>
Committed: Mon Jan 23 15:25:39 2017 +1100

----------------------------------------------------------------------
 src/backend/resourcemanager/resourcepool.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/d2608dec/src/backend/resourcemanager/resourcepool.c
----------------------------------------------------------------------
diff --git a/src/backend/resourcemanager/resourcepool.c b/src/backend/resourcemanager/resourcepool.c
index 755a4ab..db52a54 100644
--- a/src/backend/resourcemanager/resourcepool.c
+++ b/src/backend/resourcemanager/resourcepool.c
@@ -4697,10 +4697,10 @@ void adjustSegmentStatGRMCapacity(SegStat segstat)
 		if ( oldmemorymb != segstat->GRMTotalMemoryMB ||
 			 oldcore	 != segstat->GRMTotalCore )
 		{
-			elog(LOG, "Resource manager adjusts segment %s original global resource "
-					  "manager resource capacity from (%d MB, %d CORE) to "
-					  "(%d MB, %d CORE)",
-					  GET_SEGINFO_HOSTNAME(&(segstat->Info)),
+			elog(LOG, "resource manager adjusts segment %s resource capacity "
+					  "from (%d MB, %d CORE) to (%d MB, %d CORE) from the "
+					  "cluster report of global resource manager",
+					  GET_SEGINFO_GRMHOSTNAME(&(segstat->Info)),
 					  oldmemorymb,
 					  oldcore,
 					  segstat->GRMTotalMemoryMB,


[04/50] [abbrv] incubator-hawq git commit: HAWQ-1240. Fix bug of plan refinement for cursor operation

Posted by es...@apache.org.
HAWQ-1240. Fix bug of plan refinement for cursor operation


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/e25fe8b4
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/e25fe8b4
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/e25fe8b4

Branch: refs/heads/2.1.0.0-incubating
Commit: e25fe8b4243594809bd673b46318968169f700e4
Parents: cee573a
Author: Paul Guo <pa...@gmail.com>
Authored: Wed Dec 28 10:52:39 2016 +0800
Committer: Paul Guo <pa...@gmail.com>
Committed: Thu Dec 29 14:14:55 2016 +0800

----------------------------------------------------------------------
 src/backend/executor/spi.c | 21 +++++++++------------
 1 file changed, 9 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/e25fe8b4/src/backend/executor/spi.c
----------------------------------------------------------------------
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index c7b7f92..9b6ae9f 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -1145,13 +1145,13 @@ SPI_cursor_open(const char *name, SPIPlanPtr plan,
 	/* Switch to portal's memory and copy the parsetrees and plans to there */
 	oldcontext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
 	qtlist = copyObject(qtlist);
-	ptlist = copyObject(ptlist);
 
 	Query *queryTree = (Query *) linitial(qtlist);
-	queryTree = copyObject(queryTree);
+	PlannedStmt* stmt = (PlannedStmt *)linitial(ptlist);
 
-	PlannedStmt* stmt = (PlannedStmt*)linitial(ptlist);
-	stmt = refineCachedPlan(stmt, queryTree, 0 ,NULL);
+	PlannedStmt* new_stmt = refineCachedPlan(stmt, queryTree, 0, NULL);
+	if (new_stmt == stmt)
+		new_stmt = copyObject(new_stmt);
 
 	/* If the plan has parameters, set them up */
 	if (spiplan->nargs > 0)
@@ -1199,7 +1199,7 @@ SPI_cursor_open(const char *name, SPIPlanPtr plan,
 					  query_string,
 					  T_SelectStmt,
 					  CreateCommandTag(PortalListGetPrimaryStmt(qtlist)),
-					  ptlist,
+					  list_make1(new_stmt),
 					  PortalGetHeapMemory(portal));
 
 	create_filesystem_credentials(portal);
@@ -1213,13 +1213,10 @@ SPI_cursor_open(const char *name, SPIPlanPtr plan,
 	{
 		int option = CURSOR_OPT_NO_SCROLL;
 		
-		if ( list_length(ptlist) == 1 )
-		{
-			PlannedStmt *stmt = (PlannedStmt *)linitial(ptlist);
-			if ( stmt && stmt->planTree && 
-				ExecSupportsBackwardScan(stmt->planTree) )
-				option = CURSOR_OPT_SCROLL;
-		}
+		if ( new_stmt && new_stmt->planTree &&
+			ExecSupportsBackwardScan(new_stmt->planTree) )
+			option = CURSOR_OPT_SCROLL;
+
 		portal->cursorOptions |= option;
 	}
 


[44/50] [abbrv] incubator-hawq git commit: HAWQ-1228. Use profile based on file format in HCatalog integration(HiveRC, HiveText profiles).

Posted by es...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilitiesTest.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilitiesTest.java b/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilitiesTest.java
index b736bba..05ef001 100644
--- a/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilitiesTest.java
+++ b/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilitiesTest.java
@@ -23,10 +23,18 @@ package org.apache.hawq.pxf.plugins.hive.utilities;
 import static org.junit.Assert.*;
 
 import java.util.Arrays;
+import java.util.Collections;
 
 import com.google.common.base.Joiner;
+
 import org.apache.hawq.pxf.api.io.DataType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
+import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe;
+import org.apache.hadoop.hive.serde2.*;
 import org.junit.Test;
 import org.apache.hawq.pxf.api.Metadata;
 import org.apache.hawq.pxf.api.UnsupportedTypeException;
@@ -399,4 +407,49 @@ public class HiveUtilitiesTest {
             assertEquals(errorMsg, e.getMessage());
         }
     }
+
+    @Test
+    public void createDeserializer() throws Exception {
+        SerDe serde = HiveUtilities.createDeserializer(HiveUtilities.PXF_HIVE_SERDES.ORC_SERDE, HiveUtilities.PXF_HIVE_SERDES.ORC_SERDE);
+        assertTrue(serde instanceof OrcSerde);
+
+        serde = HiveUtilities.createDeserializer(HiveUtilities.PXF_HIVE_SERDES.LAZY_BINARY_COLUMNAR_SERDE, HiveUtilities.PXF_HIVE_SERDES.LAZY_BINARY_COLUMNAR_SERDE);
+        assertTrue(serde instanceof org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe);
+
+        serde = HiveUtilities.createDeserializer(HiveUtilities.PXF_HIVE_SERDES.COLUMNAR_SERDE, HiveUtilities.PXF_HIVE_SERDES.COLUMNAR_SERDE);
+        assertTrue(serde instanceof org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe);
+
+        try {
+            serde = HiveUtilities.createDeserializer(HiveUtilities.PXF_HIVE_SERDES.COLUMNAR_SERDE, HiveUtilities.PXF_HIVE_SERDES.ORC_SERDE);
+            fail("shouldn't be able to create deserializer with not allowed serde");
+        } catch (UnsupportedTypeException e) {
+            assertTrue(e.getMessage().equals("Unsupported Hive Serde: " + HiveUtilities.PXF_HIVE_SERDES.COLUMNAR_SERDE.name()));
+        }
+    }
+
+    @Test
+    public void getDelimiterCode() {
+
+        //Default delimiter code should be 44(comma)
+        Integer delimiterCode = HiveUtilities.getDelimiterCode(null);
+        char defaultDelim = ',';
+        assertTrue(delimiterCode == (int) defaultDelim);
+
+        //Some serdes use FIELD_DELIM key
+        char expectedDelim = '%';
+        StorageDescriptor sd = new StorageDescriptor();
+        SerDeInfo si = new SerDeInfo();
+        si.setParameters(Collections.singletonMap(serdeConstants.FIELD_DELIM, String.valueOf(expectedDelim)));
+        sd.setSerdeInfo(si);
+        delimiterCode = HiveUtilities.getDelimiterCode(sd);
+        assertTrue(delimiterCode == (int) expectedDelim);
+
+        //Some serdes use SERIALIZATION_FORMAT key
+        sd = new StorageDescriptor();
+        si = new SerDeInfo();
+        si.setParameters(Collections.singletonMap(serdeConstants.SERIALIZATION_FORMAT, String.valueOf((int)expectedDelim)));
+        sd.setSerdeInfo(si);
+        delimiterCode = HiveUtilities.getDelimiterCode(sd);
+        assertTrue(delimiterCode == (int) expectedDelim);
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/utilities/ProfileFactoryTest.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/utilities/ProfileFactoryTest.java b/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/utilities/ProfileFactoryTest.java
new file mode 100644
index 0000000..d588d35
--- /dev/null
+++ b/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/utilities/ProfileFactoryTest.java
@@ -0,0 +1,65 @@
+package org.apache.hawq.pxf.plugins.hive.utilities;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.SequenceFileInputFilter;
+import org.apache.hadoop.mapred.TextInputFormat;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class ProfileFactoryTest {
+
+    @Test
+    public void get() throws Exception {
+
+        // For TextInputFormat when table has no complex types, HiveText profile should be used
+        String profileName = ProfileFactory.get(new TextInputFormat(), false);
+        assertEquals("HiveText", profileName);
+
+        // For TextInputFormat when table has complex types, Hive profile should be used, HiveText doesn't support complex types yet
+        profileName = ProfileFactory.get(new TextInputFormat(), true);
+        assertEquals("Hive", profileName);
+
+        // For RCFileInputFormat when table has complex types, HiveRC profile should be used
+        profileName = ProfileFactory.get(new RCFileInputFormat(), true);
+        assertEquals("HiveRC", profileName);
+
+        // For RCFileInputFormat when table has no complex types, HiveRC profile should be used
+        profileName = ProfileFactory.get(new RCFileInputFormat(), false);
+        assertEquals("HiveRC", profileName);
+
+        // For OrcInputFormat when table has complex types, HiveORC profile should be used
+        profileName = ProfileFactory.get(new OrcInputFormat(), true);
+        assertEquals("HiveORC", profileName);
+
+        // For OrcInputFormat when table has no complex types, HiveORC profile should be used
+        profileName = ProfileFactory.get(new OrcInputFormat(), false);
+        assertEquals("HiveORC", profileName);
+
+        // For other formats Hive profile should be used
+        profileName = ProfileFactory.get(new SequenceFileInputFilter(), false);
+        assertEquals("Hive", profileName);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/BridgeOutputBuilder.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/BridgeOutputBuilder.java b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/BridgeOutputBuilder.java
index d04a2f4..1c199d3 100644
--- a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/BridgeOutputBuilder.java
+++ b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/BridgeOutputBuilder.java
@@ -92,7 +92,7 @@ public class BridgeOutputBuilder {
     void makeErrorRecord() {
         int[] errSchema = { TEXT.getOID() };
 
-        if (inputData.outputFormat() != OutputFormat.BINARY) {
+        if (inputData.outputFormat() != OutputFormat.GPDBWritable) {
             return;
         }
 
@@ -109,7 +109,7 @@ public class BridgeOutputBuilder {
      * @throws Exception if the output format is not binary
      */
     public Writable getErrorOutput(Exception ex) throws Exception {
-        if (inputData.outputFormat() == OutputFormat.BINARY) {
+        if (inputData.outputFormat() == OutputFormat.GPDBWritable) {
             errorRecord.setString(0, ex.getMessage());
             return errorRecord;
         } else {
@@ -126,7 +126,7 @@ public class BridgeOutputBuilder {
      */
     public LinkedList<Writable> makeOutput(List<OneField> recFields)
             throws BadRecordException {
-        if (output == null && inputData.outputFormat() == OutputFormat.BINARY) {
+        if (output == null && inputData.outputFormat() == OutputFormat.GPDBWritable) {
             makeGPDBWritableOutput();
         }
 
@@ -174,7 +174,7 @@ public class BridgeOutputBuilder {
      * @throws BadRecordException if building the output record failed
      */
     void fillOutputRecord(List<OneField> recFields) throws BadRecordException {
-        if (inputData.outputFormat() == OutputFormat.BINARY) {
+        if (inputData.outputFormat() == OutputFormat.GPDBWritable) {
             fillGPDBWritable(recFields);
         } else {
             fillText(recFields);

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/MetadataResponseFormatter.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/MetadataResponseFormatter.java b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/MetadataResponseFormatter.java
index 8225ec5..d2b0b5c 100644
--- a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/MetadataResponseFormatter.java
+++ b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/MetadataResponseFormatter.java
@@ -86,7 +86,8 @@ public class MetadataResponseFormatter {
                     result.append("Field #").append(++i).append(": [")
                             .append("Name: ").append(field.getName())
                             .append(", Type: ").append(field.getType().getTypeName())
-                            .append(", Source type: ").append(field.getSourceType()).append("] ");
+                            .append(", Source type: ").append(field.getSourceType())
+                            .append(", Source type is complex: ").append(field.isComplexType()).append("] ");
                 }
             }
             LOG.debug(result);

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/ProfileFactory.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/ProfileFactory.java b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/ProfileFactory.java
deleted file mode 100644
index fc5ed0f..0000000
--- a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/ProfileFactory.java
+++ /dev/null
@@ -1,45 +0,0 @@
-package org.apache.hawq.pxf.service;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
-import org.apache.hadoop.mapred.InputFormat;
-
-public class ProfileFactory {
-
-    private static final String HIVE_TEXT_PROFILE = "HiveText";
-    private static final String HIVE_RC_PROFILE = "HiveRC";
-    private static final String HIVE_ORC_PROFILE = "HiveORC";
-
-    public static String get(InputFormat inputFormat) throws Exception {
-        String profileName = null;
-        // TODO: Uncomment in process of HAWQ-1228 implementation
-        //if (inputFormat instanceof TextInputFormat) {
-        //    profileName = HIVE_TEXT_PROFILE;
-        //} else if (inputFormat instanceof RCFileInputFormat) {
-        //    profileName = HIVE_RC_PROFILE;
-        /*} else */if (inputFormat instanceof OrcInputFormat) {
-            profileName = HIVE_ORC_PROFILE;
-        }
-
-        return profileName;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/rest/MetadataResource.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/rest/MetadataResource.java b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/rest/MetadataResource.java
index 3f85bb8..5263ea2 100644
--- a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/rest/MetadataResource.java
+++ b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/rest/MetadataResource.java
@@ -50,7 +50,7 @@ import org.apache.hawq.pxf.service.utilities.SecuredHDFS;
  * Class enhances the API of the WEBHDFS REST server. Returns the metadata of a
  * given hcatalog table. <br>
  * Example for querying API FRAGMENTER from a web client:<br>
- * <code>curl -i "http://localhost:51200/pxf/{version}/Metadata/getTableMetadata?table=t1"</code>
+ * <code>curl -i "http://localhost:51200/pxf/{version}/Metadata/getMetadata?profile=PROFILE_NAME&pattern=OBJECT_PATTERN"</code>
  * <br>
  * /pxf/ is made part of the path when there is a webapp by that name in tomcat.
  */
@@ -69,7 +69,12 @@ public class MetadataResource extends RestResource {
      * Response Examples:<br>
      * For a table <code>default.t1</code> with 2 fields (a int, b float) will
      * be returned as:
-     * <code>{"PXFMetadata":[{"item":{"path":"default","name":"t1"},"fields":[{"name":"a","type":"int"},{"name":"b","type":"float"}]}]}</code>
+     * <code>{"PXFMetadata":[{"item":{"path":"default","name":"t1"},
+     * "fields":[{"name":"a","type":"int4","sourceType":"int","complexType":false},
+     * {"name":"b","type":"float8","sourceType":"double","complexType":false}],
+     * "outputFormats":["TEXT"],
+     * "outputParameters":{"DELIMITER":"1"}}]}
+     * </code>
      *
      * @param servletContext servlet context
      * @param headers http headers

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/rest/VersionResource.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/rest/VersionResource.java b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/rest/VersionResource.java
index db9743e..c9f4d20 100644
--- a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/rest/VersionResource.java
+++ b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/rest/VersionResource.java
@@ -31,7 +31,7 @@ import org.apache.commons.logging.LogFactory;
 
 /**
  * PXF protocol version. Any call to PXF resources should include the current
- * version e.g. {@code ...pxf/v14/Bridge}
+ * version e.g. {@code ...pxf/v15/Bridge}
  */
 class Version {
     /**

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/utilities/ProtocolData.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/utilities/ProtocolData.java b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/utilities/ProtocolData.java
index a0e63ce..dc2a110 100644
--- a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/utilities/ProtocolData.java
+++ b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/utilities/ProtocolData.java
@@ -71,7 +71,7 @@ public class ProtocolData extends InputData {
             filterString = getProperty("FILTER");
         }
 
-        parseFormat(getProperty("FORMAT"));
+        outputFormat = OutputFormat.valueOf(getProperty("FORMAT"));
 
         host = getProperty("URL-HOST");
         port = getIntProperty("URL-PORT");
@@ -359,26 +359,6 @@ public class ProtocolData extends InputData {
                 + threadSafeStr + "'." + " Usage: [TRUE|FALSE]");
     }
 
-    /**
-     * Sets the format type based on the input string. Allowed values are:
-     * "TEXT", "GPDBWritable".
-     *
-     * @param formatString format string
-     */
-    protected void parseFormat(String formatString) {
-        switch (formatString) {
-            case "TEXT":
-                outputFormat = OutputFormat.TEXT;
-                break;
-            case "GPDBWritable":
-                outputFormat = OutputFormat.BINARY;
-                break;
-            default:
-                throw new IllegalArgumentException(
-                        "Wrong value for greenplum.format " + formatString);
-        }
-    }
-
     /*
      * Sets the tuple description for the record
      * Attribute Projection information is optional

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-service/src/main/resources/pxf-profiles-default.xml
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/main/resources/pxf-profiles-default.xml b/pxf/pxf-service/src/main/resources/pxf-profiles-default.xml
index 1edb6d5..d36f54b 100644
--- a/pxf/pxf-service/src/main/resources/pxf-profiles-default.xml
+++ b/pxf/pxf-service/src/main/resources/pxf-profiles-default.xml
@@ -43,12 +43,16 @@ under the License.
     </profile>
     <profile>
         <name>Hive</name>
-        <description>This profile is suitable for using when connecting to Hive</description>
+        <description>
+            This profile is suitable for using when connecting to Hive.
+            Supports GPDBWritable output format, as specified in FORMAT header parameter.
+        </description>
         <plugins>
             <fragmenter>org.apache.hawq.pxf.plugins.hive.HiveDataFragmenter</fragmenter>
             <accessor>org.apache.hawq.pxf.plugins.hive.HiveAccessor</accessor>
             <resolver>org.apache.hawq.pxf.plugins.hive.HiveResolver</resolver>
             <metadata>org.apache.hawq.pxf.plugins.hive.HiveMetadataFetcher</metadata>
+            <outputFormat>org.apache.hawq.pxf.service.io.GPDBWritable</outputFormat>
         </plugins>
     </profile>
     <profile>
@@ -57,12 +61,15 @@ under the License.
             and serialized with either the ColumnarSerDe or the LazyBinaryColumnarSerDe.
             It is much faster than the general purpose Hive profile.
             DELIMITER parameter is mandatory.
+            Supports both GPDBWritable and TEXT output formats, as specified in FORMAT header parameter.
+            Primary optimized for TEXT output format.
         </description>
         <plugins>
             <fragmenter>org.apache.hawq.pxf.plugins.hive.HiveInputFormatFragmenter</fragmenter>
             <accessor>org.apache.hawq.pxf.plugins.hive.HiveRCFileAccessor</accessor>
             <resolver>org.apache.hawq.pxf.plugins.hive.HiveColumnarSerdeResolver</resolver>
             <metadata>org.apache.hawq.pxf.plugins.hive.HiveMetadataFetcher</metadata>
+            <outputFormat>org.apache.hawq.pxf.service.io.Text</outputFormat>
         </plugins>
     </profile>
     <profile>
@@ -70,12 +77,15 @@ under the License.
         <description>This profile is suitable only for Hive tables stored as Text files.
             It is much faster than the general purpose Hive profile.
             DELIMITER parameter is mandatory.
+            Supports both GPDBWritable and TEXT output formats, as specified in FORMAT header parameter.
+            Primary optimized for TEXT output format.
         </description>
         <plugins>
             <fragmenter>org.apache.hawq.pxf.plugins.hive.HiveInputFormatFragmenter</fragmenter>
             <accessor>org.apache.hawq.pxf.plugins.hive.HiveLineBreakAccessor</accessor>
             <resolver>org.apache.hawq.pxf.plugins.hive.HiveStringPassResolver</resolver>
             <metadata>org.apache.hawq.pxf.plugins.hive.HiveMetadataFetcher</metadata>
+            <outputFormat>org.apache.hawq.pxf.service.io.Text</outputFormat>
         </plugins>
     </profile>
     <profile>
@@ -83,12 +93,14 @@ under the License.
         <description>This profile is suitable only for Hive tables stored in ORC files
             and serialized with either the ColumnarSerDe or the LazyBinaryColumnarSerDe.
             It is much faster than the general purpose Hive profile.
+            Supports GPDBWritable output format, as specified in FORMAT header parameter.
         </description>
         <plugins>
             <fragmenter>org.apache.hawq.pxf.plugins.hive.HiveInputFormatFragmenter</fragmenter>
             <accessor>org.apache.hawq.pxf.plugins.hive.HiveORCAccessor</accessor>
             <resolver>org.apache.hawq.pxf.plugins.hive.HiveORCSerdeResolver</resolver>
             <metadata>org.apache.hawq.pxf.plugins.hive.HiveMetadataFetcher</metadata>
+            <outputFormat>org.apache.hawq.pxf.service.io.GPDBWritable</outputFormat>
         </plugins>
     </profile>
     <profile>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-service/src/test/java/org/apache/hawq/pxf/service/MetadataResponseFormatterTest.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/test/java/org/apache/hawq/pxf/service/MetadataResponseFormatterTest.java b/pxf/pxf-service/src/test/java/org/apache/hawq/pxf/service/MetadataResponseFormatterTest.java
index 21bf423..546b42d 100644
--- a/pxf/pxf-service/src/test/java/org/apache/hawq/pxf/service/MetadataResponseFormatterTest.java
+++ b/pxf/pxf-service/src/test/java/org/apache/hawq/pxf/service/MetadataResponseFormatterTest.java
@@ -57,7 +57,7 @@ public class MetadataResponseFormatterTest {
         response = MetadataResponseFormatter.formatResponse(metadataList, "path.file");
         StringBuilder expected = new StringBuilder("{\"PXFMetadata\":[{");
         expected.append("\"item\":{\"path\":\"default\",\"name\":\"table1\"},")
-                .append("\"fields\":[{\"name\":\"field1\",\"type\":\"int8\",\"sourceType\":\"bigint\"},{\"name\":\"field2\",\"type\":\"text\",\"sourceType\":\"string\"}]}]}");
+                .append("\"fields\":[{\"name\":\"field1\",\"type\":\"int8\",\"sourceType\":\"bigint\",\"complexType\":false},{\"name\":\"field2\",\"type\":\"text\",\"sourceType\":\"string\",\"complexType\":false}]}]}");
 
         assertEquals(expected.toString(), convertResponseToString(response));
     }
@@ -75,7 +75,7 @@ public class MetadataResponseFormatterTest {
         response = MetadataResponseFormatter.formatResponse(metadataList, "path.file");
         StringBuilder expected = new StringBuilder("{\"PXFMetadata\":[{");
         expected.append("\"item\":{\"path\":\"default\",\"name\":\"table1\"},")
-                .append("\"fields\":[{\"name\":\"field1\",\"type\":\"int8\",\"sourceType\":\"bigint\"},{\"name\":\"field2\",\"type\":\"text\",\"sourceType\":\"string\"}]}]}");
+                .append("\"fields\":[{\"name\":\"field1\",\"type\":\"int8\",\"sourceType\":\"bigint\",\"complexType\":false},{\"name\":\"field2\",\"type\":\"text\",\"sourceType\":\"string\",\"complexType\":false}]}]}");
 
         assertEquals(expected.toString(), convertResponseToString(response));
     }
@@ -97,9 +97,9 @@ public class MetadataResponseFormatterTest {
         StringBuilder expected = new StringBuilder("{\"PXFMetadata\":[{");
         expected.append("\"item\":{\"path\":\"default\",\"name\":\"table1\"},")
                 .append("\"fields\":[")
-                .append("{\"name\":\"field1\",\"type\":\"int8\",\"sourceType\":\"bigint\"},")
-                .append("{\"name\":\"field2\",\"type\":\"numeric\",\"sourceType\":\"decimal\",\"modifiers\":[\"1349\",\"1789\"]},")
-                .append("{\"name\":\"field3\",\"type\":\"bpchar\",\"sourceType\":\"char\",\"modifiers\":[\"50\"]}")
+                .append("{\"name\":\"field1\",\"type\":\"int8\",\"sourceType\":\"bigint\",\"complexType\":false},")
+                .append("{\"name\":\"field2\",\"type\":\"numeric\",\"sourceType\":\"decimal\",\"modifiers\":[\"1349\",\"1789\"],\"complexType\":false},")
+                .append("{\"name\":\"field3\",\"type\":\"bpchar\",\"sourceType\":\"char\",\"modifiers\":[\"50\"],\"complexType\":false}")
                 .append("]}]}");
 
         assertEquals(expected.toString(), convertResponseToString(response));
@@ -118,7 +118,7 @@ public class MetadataResponseFormatterTest {
         StringBuilder expected = new StringBuilder("{\"PXFMetadata\":[{");
         expected.append("\"item\":{\"path\":\"default\",\"name\":\"table1\"},")
                 .append("\"fields\":[")
-                .append("{\"name\":\"field1\",\"type\":\"float8\",\"sourceType\":\"double\"}")
+                .append("{\"name\":\"field1\",\"type\":\"float8\",\"sourceType\":\"double\",\"complexType\":false}")
                 .append("]}]}");
 
         assertEquals(expected.toString(), convertResponseToString(response));
@@ -199,7 +199,7 @@ public class MetadataResponseFormatterTest {
                 expected.append(",");
             }
             expected.append("{\"item\":{\"path\":\"default\",\"name\":\"table").append(i).append("\"},");
-            expected.append("\"fields\":[{\"name\":\"field1\",\"type\":\"int8\",\"sourceType\":\"bigint\"},{\"name\":\"field2\",\"type\":\"text\",\"sourceType\":\"string\"}]}");
+            expected.append("\"fields\":[{\"name\":\"field1\",\"type\":\"int8\",\"sourceType\":\"bigint\",\"complexType\":false},{\"name\":\"field2\",\"type\":\"text\",\"sourceType\":\"string\",\"complexType\":false}]}");
         }
         expected.append("]}");
 
@@ -226,7 +226,7 @@ public class MetadataResponseFormatterTest {
                 expected.append(",");
             }
             expected.append("{\"item\":{\"path\":\"default").append(i).append("\",\"name\":\"table").append(i).append("\"},");
-            expected.append("\"fields\":[{\"name\":\"field1\",\"type\":\"int8\",\"sourceType\":\"bigint\"},{\"name\":\"field2\",\"type\":\"text\",\"sourceType\":\"string\"}]}");
+            expected.append("\"fields\":[{\"name\":\"field1\",\"type\":\"int8\",\"sourceType\":\"bigint\",\"complexType\":false},{\"name\":\"field2\",\"type\":\"text\",\"sourceType\":\"string\",\"complexType\":false}]}");
         }
         expected.append("]}");
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/backend/access/external/fileam.c
----------------------------------------------------------------------
diff --git a/src/backend/access/external/fileam.c b/src/backend/access/external/fileam.c
index 70a115a..d16d516 100644
--- a/src/backend/access/external/fileam.c
+++ b/src/backend/access/external/fileam.c
@@ -461,8 +461,11 @@ external_stopscan(FileScanDesc scan)
 ExternalSelectDesc
 external_getnext_init(PlanState *state) {
 	ExternalSelectDesc desc = (ExternalSelectDesc) palloc0(sizeof(ExternalSelectDescData));
+
 	if (state != NULL)
+	{
 		desc->projInfo = state->ps_ProjInfo;
+	}
 	return desc;
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/backend/access/external/pxfheaders.c
----------------------------------------------------------------------
diff --git a/src/backend/access/external/pxfheaders.c b/src/backend/access/external/pxfheaders.c
index 2381044..a60dc48 100644
--- a/src/backend/access/external/pxfheaders.c
+++ b/src/backend/access/external/pxfheaders.c
@@ -56,7 +56,7 @@ void build_http_header(PxfInputData *input)
 		/* format */
 		ExtTableEntry *exttbl = GetExtTableEntry(rel->rd_id);
         /* pxf treats CSV as TEXT */
-		char* format = (fmttype_is_text(exttbl->fmtcode) || fmttype_is_csv(exttbl->fmtcode)) ? "TEXT":"GPDBWritable";
+		char* format = get_format_name(exttbl->fmtcode);
 		churl_headers_append(headers, "X-GP-FORMAT", format);
 		
 		/* Record fields - name and type of each field */
@@ -338,3 +338,22 @@ static void add_remote_credentials(CHURL_HEADERS headers)
 	if (pxf_remote_service_secret != NULL)
 	churl_headers_append(headers, "X-GP-REMOTE-PASS", pxf_remote_service_secret);
 }
+
+char* get_format_name(char fmtcode)
+{
+	char *formatName = NULL;
+
+	if (fmttype_is_text(fmtcode) || fmttype_is_csv(fmtcode))
+	{
+		formatName = TextFormatName;
+	} else if (fmttype_is_custom(fmtcode))
+	{
+		formatName = GpdbWritableFormatName;
+	} else {
+		ereport(ERROR,
+			(errcode(ERRCODE_INTERNAL_ERROR),
+			 errmsg("Unable to get format name for format code: %c", fmtcode)));
+	}
+
+	return formatName;
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/backend/access/external/test/pxfheaders_test.c
----------------------------------------------------------------------
diff --git a/src/backend/access/external/test/pxfheaders_test.c b/src/backend/access/external/test/pxfheaders_test.c
index 454ecdc..851483d 100644
--- a/src/backend/access/external/test/pxfheaders_test.c
+++ b/src/backend/access/external/test/pxfheaders_test.c
@@ -125,6 +125,22 @@ test__build_http_header__remote_credentials_are_not_null(void **state)
 	build_http_header(input_data);
 }
 
+void
+test__get_format_name(void **state)
+{
+	char fmtcode = 't';
+	char *formatName = get_format_name(fmtcode);
+	assert_string_equal(TextFormatName, formatName);
+
+	fmtcode = 'c';
+	formatName = get_format_name(fmtcode);
+	assert_string_equal(TextFormatName, formatName);
+
+	fmtcode = 'b';
+	formatName = get_format_name(fmtcode);
+	assert_string_equal(GpdbWritableFormatName, formatName);
+}
+
 /*
  * Add an expect clause on a churl_headers_append with given
  * key and value
@@ -244,6 +260,8 @@ main(int argc, char* argv[])
 		unit_test_setup_teardown(test__build_http_header__remote_login_is_not_null, 
 								 common_setup, common_teardown),
 		unit_test_setup_teardown(test__build_http_header__remote_credentials_are_not_null, 
+								 common_setup, common_teardown),
+		unit_test_setup_teardown(test__get_format_name,
 								 common_setup, common_teardown)
 	};
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/backend/catalog/external/externalmd.c
----------------------------------------------------------------------
diff --git a/src/backend/catalog/external/externalmd.c b/src/backend/catalog/external/externalmd.c
index 0e39d25..3a62f17 100644
--- a/src/backend/catalog/external/externalmd.c
+++ b/src/backend/catalog/external/externalmd.c
@@ -57,6 +57,9 @@ static void LoadDistributionPolicy(Oid relid, PxfItem *pxfItem);
 static void LoadExtTable(Oid relid, PxfItem *pxfItem);
 static void LoadColumns(Oid relid, List *columns);
 static int ComputeTypeMod(Oid typeOid, const char *colname, int *typemod, int nTypeMod);
+static Datum GetFormatTypeForProfile(const List *outputFormats);
+static Datum GetFormatOptionsForProfile(const List *outputFormats, int delimiter);
+static Datum GetLocationForFormat(char *profile, List *outputFormats, char *pxf_service_address, char *path, char *name, int delimiter);
 
 const int maxNumTypeModifiers = 2;
 /*
@@ -124,11 +127,33 @@ static PxfItem *ParsePxfItem(struct json_object *pxfMD, char* profile)
 		ereport(ERROR,
 			(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
 			 errmsg("Could not parse PXF item, expected not null value for attribute \"name\"")));
-
 	pxfItem->profile = profile;
 	pxfItem->path = pstrdup(json_object_get_string(itemPath));
 	pxfItem->name = pstrdup(json_object_get_string(itemName));
-	
+
+	/* parse output formats */
+	struct json_object *jsonOutputFormats = json_object_object_get(pxfMD, "outputFormats");
+
+	if (NULL != jsonOutputFormats)
+	{
+		const int numOutputFormats = json_object_array_length(jsonOutputFormats);
+		for (int i = 0; i < numOutputFormats; i++)
+		{
+			PxfField *pxfField = palloc0(sizeof(PxfField));
+			struct json_object *jsonOutputFormat = json_object_array_get_idx(jsonOutputFormats, i);
+			char *outupFormat = pstrdup(json_object_get_string(jsonOutputFormat));
+			pxfItem->outputFormats = lappend(pxfItem->outputFormats, outupFormat);
+		}
+	}
+
+	/* parse delimiter */
+	struct json_object *jsonOutputParameters = json_object_object_get(pxfMD, "outputParameters");
+	if (NULL != jsonOutputParameters)
+	{
+		struct json_object *outputParameterDelimiter = json_object_object_get(jsonOutputParameters, "DELIMITER");
+		pxfItem->delimiter = atoi(pstrdup(json_object_get_string(outputParameterDelimiter)));
+	}
+
 	elog(DEBUG1, "Parsed item %s, namespace %s", itemName, itemPath);
 		
 	/* parse columns */
@@ -445,36 +470,10 @@ static void LoadExtTable(Oid relid, PxfItem *pxfItem)
 		values[i] = (Datum) 0;
 	}
 
-	/* location - should be an array of text with one element:
-	 * pxf://<ip:port/namaservice>/<hive db>.<hive table>?Profile=Hive */
-	StringInfoData locationStr;
-	initStringInfo(&locationStr);
-	appendStringInfo(&locationStr, "pxf://%s/%s.%s?Profile=%s",
-			pxf_service_address, pxfItem->path, pxfItem->name, pxfItem->profile);
-	Size len = VARHDRSZ + locationStr.len;
-	/* +1 leaves room for sprintf's trailing null */
-	text *t = (text *) palloc(len + 1);
-	SET_VARSIZE(t, len);
-	sprintf((char *) VARDATA(t), "%s", locationStr.data);
-	ArrayBuildState *astate = NULL;
-	astate = accumArrayResult(astate, PointerGetDatum(t),
-							  false, TEXTOID,
-							  CurrentMemoryContext);
-	pfree(locationStr.data);
-	Assert(NULL != astate);
-	Datum location = makeArrayResult(astate, CurrentMemoryContext);
-
-	/* format options - should be "formatter 'pxfwritable_import'" */
-	StringInfoData formatStr;
-	initStringInfo(&formatStr);
-	appendStringInfo(&formatStr, "formatter 'pxfwritable_import'");
-	Datum format_opts = DirectFunctionCall1(textin, CStringGetDatum(formatStr.data));
-	pfree(formatStr.data);
-
 	values[Anum_pg_exttable_reloid - 1] = ObjectIdGetDatum(relid);
-	values[Anum_pg_exttable_location - 1] = location;
-	values[Anum_pg_exttable_fmttype - 1] = CharGetDatum('b' /* binary */);
-	values[Anum_pg_exttable_fmtopts - 1] = format_opts;
+	values[Anum_pg_exttable_location - 1] = GetLocationForFormat(pxfItem->profile, pxfItem->outputFormats, pxf_service_address, pxfItem->path, pxfItem->name, pxfItem->delimiter);
+	values[Anum_pg_exttable_fmttype - 1] = GetFormatTypeForProfile(pxfItem->outputFormats);
+	values[Anum_pg_exttable_fmtopts - 1] = GetFormatOptionsForProfile(pxfItem->outputFormats, pxfItem->delimiter);
 	nulls[Anum_pg_exttable_command - 1] = true;
 	nulls[Anum_pg_exttable_rejectlimit - 1] = true;
 	nulls[Anum_pg_exttable_rejectlimittype - 1] = true;
@@ -631,3 +630,79 @@ static int ComputeTypeMod(Oid typeOid, const char *colname, int *typemod, int nT
 	return VARHDRSZ + result;
 }
 
+static Datum GetFormatTypeForProfile(const List *outputFormats)
+{
+
+	/* if table is homogeneous and output format is text - use text*/
+	if (list_length(outputFormats) == 1 && strcmp(lfirst(list_head(outputFormats)), TextFormatName) == 0)
+	{
+		return CharGetDatum(TextFormatType);
+	} else
+	{
+		return CharGetDatum(CustomFormatType);
+	}
+}
+
+static Datum GetFormatOptionsForProfile(const List *outputFormats, int delimiter)
+{
+	StringInfoData formatStr;
+	initStringInfo(&formatStr);
+
+	/* "delimiter 'delimiter' null '\\N' escape '\\'"*/
+	char formatArr[35] = { 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65,
+			0x72, 0x20, 0x27, delimiter, 0x27, 0x20, 0x6e, 0x75, 0x6c, 0x6c,
+			0x20, 0x27, 0x5c, 0x4e, 0x27, 0x20, 0x65, 0x73, 0x63, 0x61, 0x70,
+			0x65, 0x20, 0x27, 0x5c, 0x27, 0x00 };
+
+	if (list_length(outputFormats) == 1 && strcmp(lfirst(list_head(outputFormats)),TextFormatName) == 0)
+	{
+		appendStringInfo(&formatStr, "%s", formatArr);
+	} else {
+		appendStringInfo(&formatStr, "formatter 'pxfwritable_import'");
+	}
+	Datum format_opts = DirectFunctionCall1(textin, CStringGetDatum(formatStr.data));
+	pfree(formatStr.data);
+	return format_opts;
+}
+
+/* location - should be an array of text with one element:
+ * pxf://<ip:port/namaservice>/<path>.<name>?Profile=profileName&delimiter=delimiterCode */
+static Datum GetLocationForFormat(char *profile, List *outputFormats, char *pxf_service_address, char *path, char *name, int delimiter)
+{
+	StringInfoData locationStr;
+	initStringInfo(&locationStr);
+	appendStringInfo(&locationStr, "pxf://%s/%s.%s?Profile=%s", pxf_service_address, path, name, profile);
+	bool hasTextOutputFormat = false;
+	ListCell *lc = NULL;
+	foreach (lc, outputFormats)
+	{
+		char *outputFormat = (char *) lfirst(lc);
+		if (strcmp(outputFormat, TextFormatName) == 0)
+		{
+			hasTextOutputFormat = true;
+			break;
+		}
+	}
+	if (delimiter)
+	{
+		appendStringInfo(&locationStr, "&delimiter=%cx%02x", '\\', delimiter);
+	} else if (hasTextOutputFormat)
+	{
+		ereport(ERROR,
+			(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
+			 errmsg("delimiter attribute is mandatory for output format \"TEXT\"")));
+	}
+	Size len = VARHDRSZ + locationStr.len;
+	/* +1 leaves room for sprintf's trailing null */
+	text *t = (text *) palloc(len + 1);
+	SET_VARSIZE(t, len);
+	sprintf((char *) VARDATA(t), "%s", locationStr.data);
+	ArrayBuildState *astate = NULL;
+	astate = accumArrayResult(astate, PointerGetDatum(t),
+							  false, TEXTOID,
+							  CurrentMemoryContext);
+	pfree(locationStr.data);
+	Assert(NULL != astate);
+	Datum location = makeArrayResult(astate, CurrentMemoryContext);
+	return location;
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/bin/gpfusion/gpbridgeapi.c
----------------------------------------------------------------------
diff --git a/src/bin/gpfusion/gpbridgeapi.c b/src/bin/gpfusion/gpbridgeapi.c
index b524df8..cf4dd84 100644
--- a/src/bin/gpfusion/gpbridgeapi.c
+++ b/src/bin/gpfusion/gpbridgeapi.c
@@ -68,7 +68,6 @@ void	free_token_resources(PxfInputData *inputData);
 Datum gpbridge_import(PG_FUNCTION_ARGS)
 {
 	gpbridge_check_inside_extproto(fcinfo, "gpbridge_import");
-//	ExternalSelectDesc desc = EXTPROTOCOL_GET_SELECTDESC(fcinfo);
 
 	if (gpbridge_last_call(fcinfo))
 		PG_RETURN_INT32(gpbridge_cleanup(fcinfo));
@@ -226,14 +225,17 @@ void set_current_fragment_headers(gphadoop_context* context)
 		churl_headers_remove(context->churl_headers, "X-GP-FRAGMENT-USER-DATA", true);
 	}
 
-	/* if current fragment has optimal profile set it*/
 	if (frag_data->profile)
 	{
+		/* if current fragment has optimal profile set it*/
 		churl_headers_override(context->churl_headers, "X-GP-PROFILE", frag_data->profile);
+		elog(DEBUG2, "pxf: set_current_fragment_headers: using profile: %s", frag_data->profile);
+
 	} else if (context->gphd_uri->profile)
 	{
 		/* if current fragment doesn't have any optimal profile, set to use profile from url */
 		churl_headers_override(context->churl_headers, "X-GP-PROFILE", context->gphd_uri->profile);
+		elog(DEBUG2, "pxf: set_current_fragment_headers: using profile: %s", context->gphd_uri->profile);
 	}
 	/* if there is no profile passed in url, we expect to have accessor+fragmenter+resolver so no action needed by this point */
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/include/access/hd_work_mgr.h
----------------------------------------------------------------------
diff --git a/src/include/access/hd_work_mgr.h b/src/include/access/hd_work_mgr.h
index cab8ca7..ea4c6ef 100644
--- a/src/include/access/hd_work_mgr.h
+++ b/src/include/access/hd_work_mgr.h
@@ -48,5 +48,7 @@ PxfFragmentStatsElem *get_pxf_fragments_statistics(char *uri, Relation rel);
 List *get_pxf_item_metadata(char *profile, char *pattern, Oid dboid);
 
 #define HiveProfileName "Hive"
+#define HiveTextProfileName "HiveText"
+#define HiveRCProfileName "HiveRC"
 
 #endif   /* HDWORKMGR_H */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/include/access/pxfheaders.h
----------------------------------------------------------------------
diff --git a/src/include/access/pxfheaders.h b/src/include/access/pxfheaders.h
index 410a077..f4adc6c 100644
--- a/src/include/access/pxfheaders.h
+++ b/src/include/access/pxfheaders.h
@@ -49,5 +49,6 @@ typedef struct sPxfInputData
 } PxfInputData;
 
 void build_http_header(PxfInputData *input);
+char* get_format_name(char fmtcode);
 
 #endif

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/include/access/pxfuriparser.h
----------------------------------------------------------------------
diff --git a/src/include/access/pxfuriparser.h b/src/include/access/pxfuriparser.h
index ac614c5..77ad8b0 100644
--- a/src/include/access/pxfuriparser.h
+++ b/src/include/access/pxfuriparser.h
@@ -30,7 +30,7 @@
  * All PXF's resources are under /PXF_SERVICE_PREFIX/PXF_VERSION/...
  */
 #define PXF_SERVICE_PREFIX "pxf"
-#define PXF_VERSION "v14" /* PXF version */
+#define PXF_VERSION "v15" /* PXF version */
 
 /*
  * FragmentData - describes a single Hadoop file split / HBase table region

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/include/catalog/external/itemmd.h
----------------------------------------------------------------------
diff --git a/src/include/catalog/external/itemmd.h b/src/include/catalog/external/itemmd.h
index e6dad63..d9f8721 100644
--- a/src/include/catalog/external/itemmd.h
+++ b/src/include/catalog/external/itemmd.h
@@ -67,6 +67,11 @@ typedef struct PxfItem
 	
 	/* fields */
 	List *fields;
+
+	/* output formats*/
+	List *outputFormats;
+
+	int delimiter;
 } PxfItem;
 
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/include/catalog/pg_exttable.h
----------------------------------------------------------------------
diff --git a/src/include/catalog/pg_exttable.h b/src/include/catalog/pg_exttable.h
index 3a0fadd..3256bb9 100644
--- a/src/include/catalog/pg_exttable.h
+++ b/src/include/catalog/pg_exttable.h
@@ -164,8 +164,16 @@ GetExtTableEntry(Oid relid);
 extern void
 RemoveExtTableEntry(Oid relid);
 
-#define fmttype_is_custom(c) (c == 'b')
-#define fmttype_is_text(c)   (c == 't')
-#define fmttype_is_csv(c)    (c == 'c')
+#define CustomFormatType 'b'
+#define TextFormatType 't'
+#define CsvFormatType 'c'
+
+/* PXF formats*/
+#define GpdbWritableFormatName "GPDBWritable"
+#define TextFormatName "TEXT"
+
+#define fmttype_is_custom(c) (c == CustomFormatType)
+#define fmttype_is_text(c)   (c == TextFormatType)
+#define fmttype_is_csv(c)    (c == CsvFormatType)
 
 #endif /* PG_EXTTABLE_H */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/test/regress/data/hcatalog/single_table.json
----------------------------------------------------------------------
diff --git a/src/test/regress/data/hcatalog/single_table.json b/src/test/regress/data/hcatalog/single_table.json
index b571e5d..53cce73 100644
--- a/src/test/regress/data/hcatalog/single_table.json
+++ b/src/test/regress/data/hcatalog/single_table.json
@@ -1 +1 @@
-{"PXFMetadata":[{"item":{"path":"default","name":"mytable"},"fields":[{"name":"s1","type":"text","sourceType":"string"},{"name":"s2","type":"text","sourceType":"string"},{"name":"n1","type":"int4","sourceType":"int"},{"name":"d1","type":"float8","sourceType":"double"},{"name":"dc1","type":"numeric","modifiers":["38","18"],"sourceType":"decimal"},{"name":"tm","type":"timestamp","sourceType":"timestamp"},{"name":"f","type":"float4","sourceType":"float"},{"name":"bg","type":"int8","sourceType":"bigint"},{"name":"b","type":"bool","sourceType":"boolean"},{"name":"tn","type":"int2","sourceType":"tinyint"},{"name":"sml","type":"int2","sourceType":"tinyint"},{"name":"dt","type":"date","sourceType":"date"},{"name":"vc1","type":"varchar","modifiers":["5"],"sourceType":"varchar"},{"name":"c1","type":"bpchar","modifiers":["3"],"sourceType":"char"},{"name":"bin","type":"bytea","sourceType":"binary"}]}]}
+{"PXFMetadata":[{"item":{"path":"default","name":"mytable"},"fields":[{"name":"s1","type":"text","sourceType":"string"},{"name":"s2","type":"text","sourceType":"string"},{"name":"n1","type":"int4","sourceType":"int"},{"name":"d1","type":"float8","sourceType":"double"},{"name":"dc1","type":"numeric","modifiers":["38","18"],"sourceType":"decimal"},{"name":"tm","type":"timestamp","sourceType":"timestamp"},{"name":"f","type":"float4","sourceType":"float"},{"name":"bg","type":"int8","sourceType":"bigint"},{"name":"b","type":"bool","sourceType":"boolean"},{"name":"tn","type":"int2","sourceType":"tinyint"},{"name":"sml","type":"int2","sourceType":"tinyint"},{"name":"dt","type":"date","sourceType":"date"},{"name":"vc1","type":"varchar","modifiers":["5"],"sourceType":"varchar"},{"name":"c1","type":"bpchar","modifiers":["3"],"sourceType":"char"},{"name":"bin","type":"bytea","sourceType":"binary"}],"outputFormats":["GPDBWritable"],"outputParameters":{"DELIMITER":"1"}}]}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/test/regress/data/hcatalog/single_table_text.json
----------------------------------------------------------------------
diff --git a/src/test/regress/data/hcatalog/single_table_text.json b/src/test/regress/data/hcatalog/single_table_text.json
new file mode 100644
index 0000000..dbc301a
--- /dev/null
+++ b/src/test/regress/data/hcatalog/single_table_text.json
@@ -0,0 +1 @@
+{"PXFMetadata":[{"item":{"path":"default","name":"mytable"},"fields":[{"name":"s1","type":"text","sourceType":"string"},{"name":"s2","type":"text","sourceType":"string"},{"name":"n1","type":"int4","sourceType":"int"},{"name":"d1","type":"float8","sourceType":"double"},{"name":"dc1","type":"numeric","modifiers":["38","18"],"sourceType":"decimal"},{"name":"tm","type":"timestamp","sourceType":"timestamp"},{"name":"f","type":"float4","sourceType":"float"},{"name":"bg","type":"int8","sourceType":"bigint"},{"name":"b","type":"bool","sourceType":"boolean"},{"name":"tn","type":"int2","sourceType":"tinyint"},{"name":"sml","type":"int2","sourceType":"tinyint"},{"name":"dt","type":"date","sourceType":"date"},{"name":"vc1","type":"varchar","modifiers":["5"],"sourceType":"varchar"},{"name":"c1","type":"bpchar","modifiers":["3"],"sourceType":"char"},{"name":"bin","type":"bytea","sourceType":"binary"}],"outputFormats":["TEXT"],"outputParameters":{"DELIMITER":"42"}}]}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/test/regress/input/json_load.source
----------------------------------------------------------------------
diff --git a/src/test/regress/input/json_load.source b/src/test/regress/input/json_load.source
index 6dcef8a..c6f7120 100644
--- a/src/test/regress/input/json_load.source
+++ b/src/test/regress/input/json_load.source
@@ -29,7 +29,7 @@ CREATE OR REPLACE FUNCTION search_policy(tblname text)
   AS '@abs_builddir@/regress@DLSUFFIX@', 'caql_scan_in_memory_gp_distribution_policy';
 
 CREATE OR REPLACE FUNCTION search_exttable(tblname text) 
-  RETURNS table(oid "oid", relname text, location text)
+  RETURNS table(oid "oid", relname text, location text, fmttype text, fmtopts text)
   LANGUAGE C volatile NO SQL
   AS '@abs_builddir@/regress@DLSUFFIX@', 'caql_scan_in_memory_pg_exttable';
 
@@ -59,11 +59,17 @@ SELECT nspname, nspdboid, count(distinct oid) from search_namespace('default') g
 SELECT relname, relkind from search_table('mytable');
 SELECT relname from search_type('mytable');
 SELECT relname, policy_atts from search_policy('mytable');
-SELECT relname, location from search_exttable('mytable');
+SELECT relname, location, fmttype, fmtopts from search_exttable('mytable');
 SELECT relname, relkind from search_table('mytable') where oid >= min_external_oid();
 SELECT * from search_attribute('mytable');
 END TRANSACTION;
 
+-- positive test case with a single table, using text format and custom delimiter
+BEGIN TRANSACTION;
+SELECT load_json_data('@abs_builddir@/data/hcatalog/single_table_text.json');
+SELECT relname, location, fmttype, fmtopts from search_exttable('mytable');
+END TRANSACTION;
+
 -- positive test case with multiple tables
 BEGIN TRANSACTION;
 SELECT load_json_data('@abs_builddir@/data/hcatalog/multi_table.json');
@@ -73,7 +79,7 @@ SELECT nspname, nspdboid, count(distinct oid) from search_namespace('db2') where
 SELECT relname, relkind from search_table('ht1') where oid >= min_external_oid();
 SELECT relname, count(oid) from search_type('ht1') group by relname, namespace;
 SELECT relname, policy_atts from search_policy('ht1');
-SELECT relname, location from search_exttable('ht1');
+SELECT relname, location, fmttype, fmtopts from search_exttable('ht1');
 END TRANSACTION;
 
 -- negative test: duplicated tables

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/test/regress/json_utils.c
----------------------------------------------------------------------
diff --git a/src/test/regress/json_utils.c b/src/test/regress/json_utils.c
index 77dc0b5..0028447 100644
--- a/src/test/regress/json_utils.c
+++ b/src/test/regress/json_utils.c
@@ -26,6 +26,7 @@
  * number of output columns for the UDFs for scanning in memory catalog tables
  */
 #define NUM_COLS 3
+#define NUM_COLS_EXTTABLE 5
 
 static 
 char *read_file(const char *filename);
@@ -430,13 +431,17 @@ caql_scan_in_memory_pg_exttable(PG_FUNCTION_ARGS)
 		/* switch context when allocating stuff to be used in later calls */
 		oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
 
-		tupdesc = CreateTemplateTupleDesc(NUM_COLS, false);
+		tupdesc = CreateTemplateTupleDesc(NUM_COLS_EXTTABLE, false);
 		TupleDescInitEntry(tupdesc, (AttrNumber) 1, "exttableoid",
 						   OIDOID, -1, 0);
 		TupleDescInitEntry(tupdesc, (AttrNumber) 2, "exttablename",
 						   TEXTOID, -1, 0);
 		TupleDescInitEntry(tupdesc, (AttrNumber) 3, "exttablelocation",
 						   TEXTOID, -1, 0);
+		TupleDescInitEntry(tupdesc, (AttrNumber) 4, "exttableformat",
+						   TEXTOID, -1, 0);
+		TupleDescInitEntry(tupdesc, (AttrNumber) 5, "exttableformatoptions",
+						   TEXTOID, -1, 0);
 		funcctx->tuple_desc = BlessTupleDesc(tupdesc);
 
 		/* iterate on pg_class and query
@@ -458,8 +463,8 @@ caql_scan_in_memory_pg_exttable(PG_FUNCTION_ARGS)
 
 	if (NULL != (pgclasstup = caql_getnext(pcqCtx)))
 	{
-		Datum values[NUM_COLS];
-		bool nulls[NUM_COLS];
+		Datum values[NUM_COLS_EXTTABLE];
+		bool nulls[NUM_COLS_EXTTABLE];
 
 		/* create tuples for pg_exttable table */
 		cqContext* pcqCtx1 = caql_beginscan(
@@ -482,6 +487,8 @@ caql_scan_in_memory_pg_exttable(PG_FUNCTION_ARGS)
 		nulls[1]  = false;
 
 		Datum locations = tuple_getattr(readtup, tupleDesc, Anum_pg_exttable_location);
+		Datum fmttype = tuple_getattr(readtup, tupleDesc, Anum_pg_exttable_fmttype);
+		Datum fmtopts = tuple_getattr(readtup, tupleDesc, Anum_pg_exttable_fmtopts);
 		Datum* elems = NULL;
 		int nelems;
 		deconstruct_array(DatumGetArrayTypeP(locations),
@@ -490,10 +497,19 @@ caql_scan_in_memory_pg_exttable(PG_FUNCTION_ARGS)
 		Assert(nelems > 0);
 		char *loc_str = DatumGetCString(DirectFunctionCall1(textout, elems[0]));
 		text *t2 = cstring_to_text(loc_str);
-
 		values[2] = PointerGetDatum(t2);
 		nulls[2]  = false;
 
+		char fmttype_chr = DatumGetChar(fmttype);
+		text *t3 = cstring_to_text_with_len(&fmttype_chr, 1);
+		values[3] = PointerGetDatum(t3);
+		nulls[3]  = false;
+
+		char *fmtopts_str = DatumGetCString(fmtopts);
+		text *t4 = cstring_to_text(fmtopts_str);
+		values[4] = PointerGetDatum(t4);
+		nulls[4]  = false;
+
 		/* build tuple */
 		restuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/src/test/regress/output/json_load.source
----------------------------------------------------------------------
diff --git a/src/test/regress/output/json_load.source b/src/test/regress/output/json_load.source
index 5bec43e..1717603 100644
--- a/src/test/regress/output/json_load.source
+++ b/src/test/regress/output/json_load.source
@@ -22,7 +22,7 @@ CREATE OR REPLACE FUNCTION search_policy(tblname text)
   LANGUAGE C volatile NO SQL
   AS '@abs_builddir@/regress@DLSUFFIX@', 'caql_scan_in_memory_gp_distribution_policy';
 CREATE OR REPLACE FUNCTION search_exttable(tblname text) 
-  RETURNS table(oid "oid", relname text, location text)
+  RETURNS table(oid "oid", relname text, location text, fmttype text, fmtopts text)
   LANGUAGE C volatile NO SQL
   AS '@abs_builddir@/regress@DLSUFFIX@', 'caql_scan_in_memory_pg_exttable';
 CREATE OR REPLACE FUNCTION search_attribute(tblname text) 
@@ -99,10 +99,10 @@ SELECT relname, policy_atts from search_policy('mytable');
  mytable | null
 (1 row)
 
-SELECT relname, location from search_exttable('mytable');
- relname |                      location                      
----------+----------------------------------------------------
- mytable | pxf://localhost:51200/default.mytable?Profile=Hive
+SELECT relname, location, fmttype, fmtopts from search_exttable('mytable');
+ relname |                             location                              | fmttype |            fmtopts             
+---------+-------------------------------------------------------------------+---------+--------------------------------
+ mytable | pxf://localhost:51200/default.mytable?Profile=Hive&delimiter=\x01 | b       | formatter 'pxfwritable_import'
 (1 row)
 
 SELECT relname, relkind from search_table('mytable') where oid >= min_external_oid();
@@ -132,6 +132,21 @@ SELECT * from search_attribute('mytable');
 (15 rows)
 
 END TRANSACTION;
+-- positive test case with a single table, using text format and custom delimiter
+BEGIN TRANSACTION;
+SELECT load_json_data('@abs_builddir@/data/hcatalog/single_table_text.json');
+  load_json_data  
+------------------
+ default.mytable 
+(1 row)
+
+SELECT relname, location, fmttype, fmtopts from search_exttable('mytable');
+ relname |                             location                              | fmttype |            fmtopts                 
+---------+-------------------------------------------------------------------+---------+------------------------------------
+ mytable | pxf://localhost:51200/default.mytable?Profile=Hive&delimiter=\x2a | t       | delimiter '*' null '\N' escape '\'
+(1 row)
+
+END TRANSACTION;
 -- positive test case with multiple tables
 BEGIN TRANSACTION;
 SELECT load_json_data('@abs_builddir@/data/hcatalog/multi_table.json');
@@ -173,11 +188,11 @@ SELECT relname, policy_atts from search_policy('ht1');
  ht1     | null
 (2 rows)
 
-SELECT relname, location from search_exttable('ht1');
- relname |                  location                  
----------+--------------------------------------------
- ht1     | pxf://localhost:51200/db1.ht1?Profile=Hive
- ht1     | pxf://localhost:51200/db2.ht1?Profile=Hive
+SELECT relname, location, fmttype, fmtopts from search_exttable('ht1');
+ relname |                  location                  | fmttype |            fmtopts             
+---------+--------------------------------------------+---------+--------------------------------
+ ht1     | pxf://localhost:51200/db1.ht1?Profile=Hive | b       | formatter 'pxfwritable_import'
+ ht1     | pxf://localhost:51200/db2.ht1?Profile=Hive | b       | formatter 'pxfwritable_import'
 (2 rows)
 
 END TRANSACTION;


[23/50] [abbrv] incubator-hawq git commit: HAWQ-1274. Add apache rat check in travis CI.

Posted by es...@apache.org.
HAWQ-1274. Add apache rat check in travis CI.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/0bc2c8c0
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/0bc2c8c0
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/0bc2c8c0

Branch: refs/heads/2.1.0.0-incubating
Commit: 0bc2c8c01d942a0be14323b02cf34df4c1953d22
Parents: 1cb2909
Author: xunzhang <xu...@gmail.com>
Authored: Sat Jan 14 01:50:53 2017 -0500
Committer: xunzhang <xu...@gmail.com>
Committed: Sat Jan 14 09:41:16 2017 -0500

----------------------------------------------------------------------
 .travis.yml | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/0bc2c8c0/.travis.yml
----------------------------------------------------------------------
diff --git a/.travis.yml b/.travis.yml
index cd3d3e9..4dce250 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -21,6 +21,7 @@ install:
     libevent
     bison
     cpanm
+    maven
   - brew reinstall python
   - brew outdated libyaml || brew upgrade libyaml
   - brew outdated json-c || brew upgrade json-c
@@ -31,6 +32,7 @@ install:
   - sudo cpanm install JSON
 
 before_script:
+  - mvn apache-rat:check
   - cd $TRAVIS_BUILD_DIR
   - ./configure
 


[46/50] [abbrv] incubator-hawq git commit: HAWQ-1297. Fixed pxf classpath to work with custom hadoop distros

Posted by es...@apache.org.
HAWQ-1297. Fixed pxf classpath to work with custom hadoop distros


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/524e2e50
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/524e2e50
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/524e2e50

Branch: refs/heads/2.1.0.0-incubating
Commit: 524e2e501511b8fbd1c9d517e785b6f8d5ae883c
Parents: 6fa1ced
Author: Kavinder Dhaliwal <ka...@gmail.com>
Authored: Tue Jan 31 15:26:03 2017 -0800
Committer: Kavinder Dhaliwal <ka...@gmail.com>
Committed: Tue Jan 31 15:26:03 2017 -0800

----------------------------------------------------------------------
 pxf/build.gradle | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/524e2e50/pxf/build.gradle
----------------------------------------------------------------------
diff --git a/pxf/build.gradle b/pxf/build.gradle
index 3c6d591..f273469 100644
--- a/pxf/build.gradle
+++ b/pxf/build.gradle
@@ -232,6 +232,7 @@ project('pxf-service') {
         from("src/main/resources") {
             into("/etc/pxf-${project.version}/conf")
             include("**/pxf-private*.classpath")
+            exclude("**/pxf-private.classpath")
         }
 
         from("src/main/resources/pxf-private${hddist}.classpath") {


[09/50] [abbrv] incubator-hawq git commit: HAWQ-1249. Don't do ACL checks on segments

Posted by es...@apache.org.
HAWQ-1249. Don't do ACL checks on segments


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/2f5910f2
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/2f5910f2
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/2f5910f2

Branch: refs/heads/2.1.0.0-incubating
Commit: 2f5910f2b0c2877e524c4c428ed963255c176378
Parents: 8d22582
Author: Chunling Wang <wa...@126.com>
Authored: Mon Jan 9 14:35:11 2017 +0800
Committer: Chunling Wang <wa...@126.com>
Committed: Mon Jan 9 14:35:11 2017 +0800

----------------------------------------------------------------------
 src/backend/catalog/aclchk.c        | 85 +++++++++++++++++++++++---------
 src/backend/executor/execMain.c     | 37 +-------------
 src/backend/parser/parse_relation.c | 35 +++----------
 3 files changed, 72 insertions(+), 85 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/2f5910f2/src/backend/catalog/aclchk.c
----------------------------------------------------------------------
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index d19a045..01a4f94 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -224,8 +224,9 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs,
 	 * If we found no grant options, consider whether to issue a hard error.
 	 * Per spec, having any privilege at all on the object will get you by
 	 * here.
+	 * QE bypass all permission checking.
 	 */
-	if (avail_goptions == ACL_NO_RIGHTS)
+	if (avail_goptions == ACL_NO_RIGHTS && Gp_role != GP_ROLE_EXECUTE)
 	{
 	  if (enable_ranger && !fallBackToNativeCheck(objkind, objectId, grantorId)) {
 	    if (pg_rangercheck(objkind, objectId, grantorId,
@@ -2948,9 +2949,9 @@ pg_class_aclmask(Oid table_oid, Oid roleid,
 		}
 	}
 	/*
-	 * Otherwise, superusers or on QE bypass all permission-checking.
+	 * Otherwise, superusers bypass all permission-checking.
 	 */
-	if (GP_ROLE_EXECUTE == Gp_role || superuser_arg(roleid))
+	if (superuser_arg(roleid))
 	{
 #ifdef ACLDEBUG
 		elog(DEBUG2, "OID %u is superuser, home free", roleid);
@@ -3006,8 +3007,8 @@ pg_database_aclmask(Oid db_oid, Oid roleid,
 	Oid			ownerId;
 	cqContext  *pcqCtx;
 
-	/* Superusers or on QE bypass all permission checking. */
-	if (GP_ROLE_EXECUTE == Gp_role || superuser_arg(roleid))
+	/* Superusers bypass all permission checking. */
+	if (superuser_arg(roleid))
 		return mask;
 
 	/*
@@ -3069,8 +3070,8 @@ pg_proc_aclmask(Oid proc_oid, Oid roleid,
 	Oid			ownerId;
 	cqContext  *pcqCtx;
 
-	/* Superusers or on QE bypass all permission checking. */
-	if (GP_ROLE_EXECUTE == Gp_role || superuser_arg(roleid))
+	/* Superusers bypass all permission checking. */
+	if (superuser_arg(roleid))
 		return mask;
 
 	/*
@@ -3131,8 +3132,8 @@ pg_language_aclmask(Oid lang_oid, Oid roleid,
 	Oid			ownerId;
 	cqContext  *pcqCtx;
 
-	/* Superusers or on QE bypass all permission checking. */
-	if (GP_ROLE_EXECUTE == Gp_role || superuser_arg(roleid))
+	/* Superusers bypass all permission checking. */
+	if (superuser_arg(roleid))
 		return mask;
 
 	/*
@@ -3194,8 +3195,8 @@ pg_namespace_aclmask(Oid nsp_oid, Oid roleid,
 	Oid			ownerId;
 	cqContext  *pcqCtx;
 
-	/* Superusers or on QE bypass all permission checking. */
-	if (GP_ROLE_EXECUTE == Gp_role || superuser_arg(roleid))
+	/* Superusers bypass all permission checking. */
+	if (superuser_arg(roleid))
 		return mask;
 
 	/*
@@ -3293,8 +3294,8 @@ pg_tablespace_aclmask(Oid spc_oid, Oid roleid,
 	if (spc_oid == GLOBALTABLESPACE_OID && !(IsBootstrapProcessingMode()||gp_upgrade_mode))
 		return 0;
 
-	/* Otherwise, superusers or on QE bypass all permission checking. */
-	if (GP_ROLE_EXECUTE == Gp_role || superuser_arg(roleid))
+	/* Superusers bypass all permission checking. */
+	if (superuser_arg(roleid))
 		return mask;
 
 	/*
@@ -3366,8 +3367,8 @@ pg_foreign_data_wrapper_aclmask(Oid fdw_oid, Oid roleid,
 
 	Form_pg_foreign_data_wrapper fdwForm;
 
-	/* Bypass permission checks for superusers or on QE */
-	if (GP_ROLE_EXECUTE == Gp_role || superuser_arg(roleid))
+	/* Superusers bypass all permission checking. */
+	if (superuser_arg(roleid))
 		return mask;
 
 	/*
@@ -3435,8 +3436,8 @@ pg_foreign_server_aclmask(Oid srv_oid, Oid roleid,
 
 	Form_pg_foreign_server srvForm;
 
-	/* Bypass permission checks for superusers or on QE */
-	if (GP_ROLE_EXECUTE == Gp_role || superuser_arg(roleid))
+	/* Superusers bypass all permission checking. */
+	if (superuser_arg(roleid))
 		return mask;
 
 	/*
@@ -3505,10 +3506,10 @@ pg_extprotocol_aclmask(Oid ptcOid, Oid roleid,
 	cqContext	cqc;
 	cqContext  *pcqCtx;
 
-	/* Bypass permission checks for superusers or on QE */
-	if (GP_ROLE_EXECUTE == Gp_role || superuser_arg(roleid))
+	/* Superusers bypass all permission checking. */
+	if (superuser_arg(roleid))
 		return mask;
-	
+
 	rel = heap_open(ExtprotocolRelationId, AccessShareLock);
 
 	pcqCtx = caql_beginscan(
@@ -3585,8 +3586,8 @@ pg_filesystem_aclmask(Oid fsysOid, Oid roleid,
 	ScanKeyData entry[1];
 
 
-	/* Bypass permission checks for superusers or on QE */
-	if (GP_ROLE_EXECUTE == Gp_role || superuser_arg(roleid))
+	/* Bypass permission checks for superusers */
+	if (superuser_arg(roleid))
 		return mask;
 	
 	/*
@@ -3788,6 +3789,10 @@ pg_filesystem_nativecheck(Oid fsysid, Oid roleid, AclMode mode)
 AclResult
 pg_class_aclcheck(Oid table_oid, Oid roleid, AclMode mode)
 {
+  /* Bypass all permission checking on QE. */
+  if (Gp_role == GP_ROLE_EXECUTE)
+    return ACLCHECK_OK;
+
   if(enable_ranger && !fallBackToNativeCheck(ACL_KIND_CLASS, table_oid, roleid))
   {
     return pg_rangercheck(ACL_KIND_CLASS, table_oid, roleid, mode, ACLMASK_ANY);
@@ -3804,6 +3809,10 @@ pg_class_aclcheck(Oid table_oid, Oid roleid, AclMode mode)
 AclResult
 pg_database_aclcheck(Oid db_oid, Oid roleid, AclMode mode)
 {
+  /* Bypass all permission checking on QE. */
+  if (Gp_role == GP_ROLE_EXECUTE)
+    return ACLCHECK_OK;
+
   if(enable_ranger && !fallBackToNativeCheck(ACL_KIND_DATABASE, db_oid, roleid))
    {
      return pg_rangercheck(ACL_KIND_DATABASE, db_oid, roleid, mode, ACLMASK_ANY);
@@ -3820,6 +3829,10 @@ pg_database_aclcheck(Oid db_oid, Oid roleid, AclMode mode)
 AclResult
 pg_proc_aclcheck(Oid proc_oid, Oid roleid, AclMode mode)
 {
+  /* Bypass all permission checking on QE. */
+  if (Gp_role == GP_ROLE_EXECUTE)
+    return ACLCHECK_OK;
+
   if(enable_ranger && !fallBackToNativeCheck(ACL_KIND_PROC, proc_oid, roleid))
   {
     return pg_rangercheck(ACL_KIND_PROC, proc_oid, roleid, mode, ACLMASK_ANY);
@@ -3836,6 +3849,10 @@ pg_proc_aclcheck(Oid proc_oid, Oid roleid, AclMode mode)
 AclResult
 pg_language_aclcheck(Oid lang_oid, Oid roleid, AclMode mode)
 {
+  /* Bypass all permission checking on QE. */
+  if (Gp_role == GP_ROLE_EXECUTE)
+    return ACLCHECK_OK;
+
   if(enable_ranger && !fallBackToNativeCheck(ACL_KIND_LANGUAGE, lang_oid, roleid))
   {
     return pg_rangercheck(ACL_KIND_LANGUAGE, lang_oid, roleid, mode, ACLMASK_ANY);
@@ -3852,6 +3869,10 @@ pg_language_aclcheck(Oid lang_oid, Oid roleid, AclMode mode)
 AclResult
 pg_namespace_aclcheck(Oid nsp_oid, Oid roleid, AclMode mode)
 {
+  /* Bypass all permission checking on QE. */
+  if (Gp_role == GP_ROLE_EXECUTE)
+    return ACLCHECK_OK;
+
   if(enable_ranger && !fallBackToNativeCheck(ACL_KIND_NAMESPACE, nsp_oid, roleid))
   {
     return pg_rangercheck(ACL_KIND_NAMESPACE, nsp_oid, roleid, mode, ACLMASK_ANY);
@@ -3868,6 +3889,10 @@ pg_namespace_aclcheck(Oid nsp_oid, Oid roleid, AclMode mode)
 AclResult
 pg_tablespace_aclcheck(Oid spc_oid, Oid roleid, AclMode mode)
 {
+  /* Bypass all permission checking on QE. */
+  if (Gp_role == GP_ROLE_EXECUTE)
+    return ACLCHECK_OK;
+
   if(enable_ranger && !fallBackToNativeCheck(ACL_KIND_TABLESPACE, spc_oid, roleid))
   {
     return pg_rangercheck(ACL_KIND_TABLESPACE, spc_oid, roleid, mode, ACLMASK_ANY);
@@ -3885,6 +3910,10 @@ pg_tablespace_aclcheck(Oid spc_oid, Oid roleid, AclMode mode)
 AclResult
 pg_foreign_data_wrapper_aclcheck(Oid fdw_oid, Oid roleid, AclMode mode)
 {
+  /* Bypass all permission checking on QE. */
+  if (Gp_role == GP_ROLE_EXECUTE)
+    return ACLCHECK_OK;
+
   if(enable_ranger && !fallBackToNativeCheck(ACL_KIND_FDW, fdw_oid, roleid))
   {
     return pg_rangercheck(ACL_KIND_FDW, fdw_oid, roleid, mode, ACLMASK_ANY);
@@ -3902,6 +3931,10 @@ pg_foreign_data_wrapper_aclcheck(Oid fdw_oid, Oid roleid, AclMode mode)
 AclResult
 pg_foreign_server_aclcheck(Oid srv_oid, Oid roleid, AclMode mode)
 {
+  /* Bypass all permission checking on QE. */
+  if (Gp_role == GP_ROLE_EXECUTE)
+    return ACLCHECK_OK;
+
   if(enable_ranger && !fallBackToNativeCheck(ACL_KIND_FOREIGN_SERVER, srv_oid, roleid))
   {
     return pg_rangercheck(ACL_KIND_FOREIGN_SERVER, srv_oid, roleid, mode, ACLMASK_ANY);
@@ -3919,6 +3952,10 @@ pg_foreign_server_aclcheck(Oid srv_oid, Oid roleid, AclMode mode)
 AclResult
 pg_extprotocol_aclcheck(Oid ptcid, Oid roleid, AclMode mode)
 {
+  /* Bypass all permission checking on QE. */
+  if (Gp_role == GP_ROLE_EXECUTE)
+    return ACLCHECK_OK;
+
   if(enable_ranger && !fallBackToNativeCheck(ACL_KIND_EXTPROTOCOL, ptcid, roleid))
   {
     return pg_rangercheck(ACL_KIND_EXTPROTOCOL, ptcid, roleid, mode, ACLMASK_ANY);
@@ -3935,6 +3972,10 @@ pg_extprotocol_aclcheck(Oid ptcid, Oid roleid, AclMode mode)
 AclResult
 pg_filesystem_aclcheck(Oid fsysid, Oid roleid, AclMode mode)
 {
+  /* Bypass all permission checking on QE. */
+  if (Gp_role == GP_ROLE_EXECUTE)
+    return ACLCHECK_OK;
+
   if(enable_ranger && !fallBackToNativeCheck(ACL_KIND_FILESYSTEM, fsysid, roleid))
   {
     return pg_rangercheck(ACL_KIND_FILESYSTEM, fsysid, roleid, mode, ACLMASK_ANY);

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/2f5910f2/src/backend/executor/execMain.c
----------------------------------------------------------------------
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 30f6d09..666d16f 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -1912,45 +1912,10 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 	 * rangetable here --- subplan RTEs will be checked during
 	 * ExecInitSubPlan().
 	 */
-	if (operation != CMD_SELECT ||
-			(Gp_role != GP_ROLE_EXECUTE &&
-			 !(shouldDispatch && cdbpathlocus_querysegmentcatalogs)))
+	if (Gp_role != GP_ROLE_EXECUTE)
 	{
 		ExecCheckRTPerms(plannedstmt->rtable);
 	}
-	else
-	{
-		/*
-		 * We don't check the rights here, so we can query pg_statistic even if we are a non-privileged user.
-		 * This shouldn't cause a problem, because "cdbpathlocus_querysegmentcatalogs" can only be true if we
-		 * are doing special catalog queries for ANALYZE.  Otherwise, the QD will execute the normal access right
-		 * check.  This does open a security hole, as it's possible for a hacker to connect to a segdb with GP_ROLE_EXECUTE,
-		 * (at least, in theory, although it isn't easy) and then do a query.  But all they can see is
-		 * pg_statistic and pg_class, and pg_class is normally readable by everyone.
-		 */
-
-		ListCell *lc = NULL;
-
-		foreach(lc, plannedstmt->rtable)
-		{
-			RangeTblEntry *rte = lfirst(lc);
-
-			if (rte->rtekind != RTE_RELATION)
-				continue;
-
-			if (rte->requiredPerms == 0)
-				continue;
-
-			/*
-			 * Ignore access rights check on pg_statistic and pg_class, so
-			 * the QD can retreive the statistics from the QEs.
-			 */
-			if (rte->relid != StatisticRelationId && rte->relid != RelationRelationId)
-			{
-				ExecCheckRTEPerms(rte);
-			}
-		}
-	}
 
 	/*
 	 * get information from query descriptor

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/2f5910f2/src/backend/parser/parse_relation.c
----------------------------------------------------------------------
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 7dbe496..f9444ef 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -2841,33 +2841,14 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
 	/*
 	 * We must have *all* the requiredPerms bits, so use aclmask not aclcheck.
 	 */
-	if (enable_ranger && !fallBackToNativeCheck(ACL_KIND_CLASS, relOid, userid))
-	{
-	  elog(LOG, "ExecCheckRTEPerms: here");
-	  /* ranger check required permission should all be approved.*/
-    if (pg_rangercheck(ACL_KIND_CLASS, relOid, userid, requiredPerms, ACLMASK_ALL)
-        != RANGERCHECK_OK)
-    {
-      /*
-       * If the table is a partition, return an error message that includes
-       * the name of the parent table.
-       */
-      const char *rel_name = get_rel_name_partition(relOid);
-      aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, rel_name);
-    }
-	}
-	else
-	{
-	  if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
-	        != requiredPerms)
-    {
-      /*
-       * If the table is a partition, return an error message that includes
-       * the name of the parent table.
-       */
-      const char *rel_name = get_rel_name_partition(relOid);
-      aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, rel_name);
-    }
+	if (pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL)
+			!= requiredPerms) {
+		/*
+		 * If the table is a partition, return an error message that includes
+		 * the name of the parent table.
+		 */
+		const char *rel_name = get_rel_name_partition(relOid);
+		aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, rel_name);
 	}
 }
 


[29/50] [abbrv] incubator-hawq git commit: HAWQ-1203. Ranger Plugin Service Implementation. (with contributions by Lav Jain and Leslie Chang) (close #1092)

Posted by es...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizerTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizerTest.java b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizerTest.java
new file mode 100644
index 0000000..0a439db
--- /dev/null
+++ b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizerTest.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hawq.ranger.authorization;
+
+import org.apache.hawq.ranger.authorization.model.AuthorizationRequest;
+import org.apache.hawq.ranger.authorization.model.AuthorizationResponse;
+import org.apache.hawq.ranger.authorization.model.HawqPrivilege;
+import org.apache.hawq.ranger.authorization.model.HawqResource;
+import org.apache.hawq.ranger.authorization.model.ResourceAccess;
+import org.apache.ranger.plugin.policyengine.RangerAccessRequest;
+import org.apache.ranger.plugin.policyengine.RangerAccessResult;
+import org.apache.ranger.plugin.service.RangerBasePlugin;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.ArgumentMatcher;
+import org.mockito.Mock;
+import org.mockito.internal.util.collections.Sets;
+import org.mockito.runners.MockitoJUnitRunner;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.argThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+
+@RunWith(MockitoJUnitRunner.class)
+public class RangerHawqAuthorizerTest {
+
+    private static final Integer TEST_REQUEST_ID = 1;
+    private static final String TEST_USER = "alex";
+    private static final String TEST_CLIENT = "1.2.3.4";
+    private static final String TEST_CONTEXT = "SELECT * FROM sales";
+    private static final Set<HawqPrivilege> TEST_PRIVILEGES = Sets.newSet(HawqPrivilege.select, HawqPrivilege.update);
+
+    private static final String TEST_RESOURCE_REQUEST =
+            "finance:us:sales>select,update#finance:emea:sales>create";
+    private static final String TEST_RESOURCE_RESPONSE_ALL_FALSE =
+            "finance:us:sales>select,update>false#finance:emea:sales>create>false";
+    private static final String TEST_RESOURCE_RESPONSE_ALL_TRUE =
+            "finance:us:sales>select,update>true#finance:emea:sales>create>true";
+    private static final String TEST_RESOURCE_RESPONSE_US_ALLOWED_EMEA_DENIED =
+            "finance:us:sales>select,update>true#finance:emea:sales>create>false";
+    private static final String TEST_RESOURCE_RESPONSE_UPDATE_DENIED =
+            "finance:us:sales>select,update>false#finance:emea:sales>create>true";
+
+    private static final String TEST_RESOURCE_REQUEST_CREATE_SCHEMA  = "finance>create";
+    private static final String TEST_RESOURCE_RESPONSE_CREATE_SCHEMA = "finance>create>true";
+    private static final String TEST_RESOURCE_REQUEST_USAGE_SCHEMA  = "finance:us>usage";
+    private static final String TEST_RESOURCE_RESPONSE_USAGE_SCHEMA = "finance:us>usage>true";
+
+    private RangerHawqAuthorizer authorizer;
+
+    @Mock
+    private RangerBasePlugin mockRangerPlugin;
+    @Mock
+    private RangerAccessResult mockRangerAccessResult;
+
+    @Before
+    public void setup() throws Exception {
+        authorizer = RangerHawqAuthorizer.getInstance();
+        authorizer.setRangerPlugin(mockRangerPlugin);
+    }
+
+    @Test
+    public void testAuthorize_allAllowed() throws Exception {
+        when(mockRangerPlugin.isAccessAllowed(any(RangerAccessRequest.class))).thenReturn(mockRangerAccessResult);
+        when(mockRangerAccessResult.getIsAllowed()).thenReturn(true);
+        testRequest(TEST_RESOURCE_REQUEST, TEST_RESOURCE_RESPONSE_ALL_TRUE);
+    }
+
+    @Test
+    public void testAuthorize_allDenied() throws Exception {
+        when(mockRangerPlugin.isAccessAllowed(any(RangerAccessRequest.class))).thenReturn(mockRangerAccessResult);
+        when(mockRangerAccessResult.getIsAllowed()).thenReturn(false);
+        testRequest(TEST_RESOURCE_REQUEST, TEST_RESOURCE_RESPONSE_ALL_FALSE);
+    }
+
+    @Test
+    public void testAuthorize_usAllowedEmeaDenied() throws Exception {
+        RangerAccessResult mockRangerAccessResultUS = mock(RangerAccessResult.class);
+        RangerAccessResult mockRangerAccessResultEMEA = mock(RangerAccessResult.class);
+
+        when(mockRangerPlugin.isAccessAllowed(argThat(new SchemaMatcher("us")))).thenReturn(mockRangerAccessResultUS);
+        when(mockRangerPlugin.isAccessAllowed(argThat(new SchemaMatcher("emea")))).thenReturn(mockRangerAccessResultEMEA);
+        when(mockRangerAccessResultUS.getIsAllowed()).thenReturn(true);
+        when(mockRangerAccessResultEMEA.getIsAllowed()).thenReturn(false);
+        testRequest(TEST_RESOURCE_REQUEST, TEST_RESOURCE_RESPONSE_US_ALLOWED_EMEA_DENIED);
+    }
+
+    @Test
+    public void testAuthorize_partialPrivilegeUpdateDenied() throws Exception {
+        RangerAccessResult mockRangerAccessResultCreateSelect = mock(RangerAccessResult.class);
+        RangerAccessResult mockRangerAccessResultUpdate = mock(RangerAccessResult.class);
+
+        when(mockRangerPlugin.isAccessAllowed(argThat(new PrivilegeMatcher("create", "select")))).thenReturn(mockRangerAccessResultCreateSelect);
+        when(mockRangerPlugin.isAccessAllowed(argThat(new PrivilegeMatcher("update")))).thenReturn(mockRangerAccessResultUpdate);
+        when(mockRangerAccessResultCreateSelect.getIsAllowed()).thenReturn(true);
+        when(mockRangerAccessResultUpdate.getIsAllowed()).thenReturn(false);
+        testRequest(TEST_RESOURCE_REQUEST, TEST_RESOURCE_RESPONSE_UPDATE_DENIED);
+    }
+
+    @Test
+    public void testAuthorize_createSchemaAllowed() throws Exception {
+        RangerAccessResult mockRangerAccessResultCreate = mock(RangerAccessResult.class);
+
+        when(mockRangerPlugin.isAccessAllowed(argThat(new PrivilegeMatcher("create-schema")))).thenReturn(mockRangerAccessResultCreate);
+        when(mockRangerAccessResultCreate.getIsAllowed()).thenReturn(true);
+        testRequest(TEST_RESOURCE_REQUEST_CREATE_SCHEMA, TEST_RESOURCE_RESPONSE_CREATE_SCHEMA);
+    }
+
+    @Test
+    public void testAuthorize_usageSchemaAllowed() throws Exception {
+        RangerAccessResult mockRangerAccessResultUsage = mock(RangerAccessResult.class);
+
+        when(mockRangerPlugin.isAccessAllowed(argThat(new PrivilegeMatcher("usage-schema")))).thenReturn(mockRangerAccessResultUsage);
+        when(mockRangerAccessResultUsage.getIsAllowed()).thenReturn(true);
+        testRequest(TEST_RESOURCE_REQUEST_USAGE_SCHEMA, TEST_RESOURCE_RESPONSE_USAGE_SCHEMA);
+    }
+
+    /* ----- VALIDATION TESTS ----- */
+
+    @Test(expected=IllegalArgumentException.class)
+    public void testAuthorize_validationFailure_requestId() {
+        AuthorizationRequest request = prepareRequest(null, TEST_USER, TEST_CLIENT, TEST_CONTEXT, TEST_RESOURCE_REQUEST);
+        authorizer.isAccessAllowed(request);
+    }
+    @Test(expected=IllegalArgumentException.class)
+    public void testAuthorize_validationFailure_user() {
+        AuthorizationRequest request = prepareRequest(TEST_REQUEST_ID, "", TEST_CLIENT, TEST_CONTEXT, TEST_RESOURCE_REQUEST);
+        authorizer.isAccessAllowed(request);
+    }
+    @Test(expected=IllegalArgumentException.class)
+    public void testAuthorize_validationFailure_client() {
+        AuthorizationRequest request = prepareRequest(TEST_REQUEST_ID, TEST_USER, "", TEST_CONTEXT, TEST_RESOURCE_REQUEST);
+        authorizer.isAccessAllowed(request);
+    }
+    @Test(expected=IllegalArgumentException.class)
+    public void testAuthorize_validationFailure_context() {
+        AuthorizationRequest request = prepareRequest(TEST_REQUEST_ID, TEST_USER, TEST_CLIENT, "", TEST_RESOURCE_REQUEST);
+        authorizer.isAccessAllowed(request);
+    }
+    @Test(expected=IllegalArgumentException.class)
+    public void testAuthorize_validationFailure_emptyAccessSet() {
+        AuthorizationRequest request = prepareRequest(TEST_REQUEST_ID, TEST_USER, TEST_CLIENT, TEST_CONTEXT, new HashSet<ResourceAccess>());
+        authorizer.isAccessAllowed(request);
+    }
+    @Test(expected=IllegalArgumentException.class)
+    public void testAuthorize_validationFailure_emptyResource() {
+        ResourceAccess resourceAccess = new ResourceAccess();
+        resourceAccess.setResource(new HashMap<HawqResource, String>());
+        resourceAccess.setPrivileges(TEST_PRIVILEGES);
+        AuthorizationRequest request = prepareRequest(TEST_REQUEST_ID, TEST_USER, TEST_CLIENT, TEST_CONTEXT, resourceAccess);
+        authorizer.isAccessAllowed(request);
+    }
+    @Test(expected=IllegalArgumentException.class)
+    public void testAuthorize_validationFailure_emptyResourceValue() {
+        ResourceAccess resourceAccess = new ResourceAccess();
+        HashMap<HawqResource, String> resource = new HashMap<>();
+        resource.put(HawqResource.database, "");
+        resourceAccess.setResource(resource);
+        resourceAccess.setPrivileges(TEST_PRIVILEGES);
+        AuthorizationRequest request = prepareRequest(TEST_REQUEST_ID, TEST_USER, TEST_CLIENT, TEST_CONTEXT, resourceAccess);
+        authorizer.isAccessAllowed(request);
+    }
+    @Test(expected=IllegalArgumentException.class)
+    public void testAuthorize_validationFailure_emptyPrivileges() {
+        ResourceAccess resourceAccess = new ResourceAccess();
+        HashMap<HawqResource, String> resource = new HashMap<>();
+        resource.put(HawqResource.database, "abc");
+        resourceAccess.setResource(resource);
+        resourceAccess.setPrivileges(new HashSet<HawqPrivilege>());
+        AuthorizationRequest request = prepareRequest(TEST_REQUEST_ID, TEST_USER, TEST_CLIENT, TEST_CONTEXT, resourceAccess);
+        authorizer.isAccessAllowed(request);
+    }
+
+    /* ----- HELPER METHODS ----- */
+
+    private void testRequest(String request, String expectedResponse) {
+        AuthorizationRequest authRequest = prepareRequest(TEST_REQUEST_ID, TEST_USER, TEST_CLIENT, TEST_CONTEXT, request);
+        AuthorizationResponse authResponse = authorizer.isAccessAllowed(authRequest);
+        validateResponse(authResponse, expectedResponse);
+    }
+
+    private AuthorizationRequest prepareRequest(
+            Integer requestId, String user, String clientIp, String context, Set<ResourceAccess> access) {
+
+        AuthorizationRequest request = new AuthorizationRequest();
+        request.setRequestId(requestId);
+        request.setUser(user);
+        request.setClientIp(clientIp);
+        request.setContext(context);
+        request.setAccess(access);
+
+        return request;
+    }
+
+    private AuthorizationRequest prepareRequest(
+            Integer requestId, String user, String clientIp, String context, ResourceAccess resourceAccess) {
+
+        Set<ResourceAccess> access = new HashSet<>();
+        access.add(resourceAccess);
+        return prepareRequest(requestId, user, clientIp, context, access);
+    }
+
+    private AuthorizationRequest prepareRequest(
+            Integer requestId, String user, String clientIp, String context, String resources) {
+
+        Set<ResourceAccess> access = new HashSet<>();
+        // resource string is like "db:schema:table>select,update#db:schema:table>create"
+        for (String resourceStr : resources.split("#")) {
+            String[] parts = resourceStr.split(">");
+            String[] resource = parts[0].split(":");
+            String[] privs = parts[1].split(",");
+
+            Map<HawqResource, String> tableResource = new HashMap<>();
+            tableResource.put(HawqResource.database, resource[0]);
+            if (resource.length > 1) {
+                tableResource.put(HawqResource.schema, resource[1]);
+            }
+            if (resource.length > 2) {
+                tableResource.put(HawqResource.table, resource[2]);
+            }
+            ResourceAccess tableAccess = new ResourceAccess();
+            tableAccess.setResource(tableResource);
+
+            Set<HawqPrivilege> privSet = new HashSet<>();
+            for (String priv : privs) {
+                privSet.add(HawqPrivilege.valueOf(priv));
+            }
+            tableAccess.setPrivileges(privSet);
+            access.add(tableAccess);
+        }
+
+        return prepareRequest(requestId, user, clientIp, context, access);
+    }
+
+    private void validateResponse(AuthorizationResponse response, String resources) {
+
+        assertNotNull(response);
+
+        Set<ResourceAccess> actual = response.getAccess();
+        Set<ResourceAccess> expected = new HashSet<>();
+
+        // resources string is like "db:schema:table>select,update>true#db:schema:table>create>false"
+        for (String resourceStr : resources.split("#")) {
+            String[] parts = resourceStr.split(">");
+            String[] resource = parts[0].split(":");
+            String[] privs = parts[1].split(",");
+            Boolean allowed = Boolean.valueOf(parts[2]);
+
+            Map<HawqResource, String> tableResource = new HashMap<>();
+            tableResource.put(HawqResource.database, resource[0]);
+            if (resource.length > 1) {
+                tableResource.put(HawqResource.schema, resource[1]);
+            }
+            if (resource.length > 2) {
+                tableResource.put(HawqResource.table, resource[2]);
+            }
+            ResourceAccess tableAccess = new ResourceAccess();
+            tableAccess.setResource(tableResource);
+
+            Set<HawqPrivilege> privSet = new HashSet<>();
+            for (String priv : privs) {
+                privSet.add(HawqPrivilege.fromString(priv));
+            }
+            tableAccess.setPrivileges(privSet);
+            tableAccess.setAllowed(allowed);
+
+            expected.add(tableAccess);
+        }
+
+        assertEquals(expected.size(), actual.size());
+        assertEquals(expected, actual);
+    }
+
+    /* ----- Argument Matchers ----- */
+
+    private class SchemaMatcher extends ArgumentMatcher<RangerAccessRequest> {
+        private String schema;
+        public SchemaMatcher(String schema) {
+            this.schema = schema;
+        }
+        @Override
+        public boolean matches(Object request) {
+            return request == null ? false :
+                    schema.equals(((RangerAccessRequest) request).getResource().getAsMap().get("schema"));
+        }
+    };
+
+    private class PrivilegeMatcher extends ArgumentMatcher<RangerAccessRequest> {
+        private Set<String> privileges;
+        public PrivilegeMatcher(String... privileges) {
+            this.privileges = Sets.newSet(privileges);
+        }
+        @Override
+        public boolean matches(Object request) {
+            return request == null ? false :
+                    privileges.contains(((RangerAccessRequest) request).getAccessType());
+        }
+    };
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqPluginResourceTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqPluginResourceTest.java b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqPluginResourceTest.java
new file mode 100644
index 0000000..40c2217
--- /dev/null
+++ b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqPluginResourceTest.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hawq.ranger.authorization;
+
+import org.apache.hawq.ranger.authorization.model.AuthorizationRequest;
+import org.apache.hawq.ranger.authorization.model.AuthorizationResponse;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.powermock.api.mockito.PowerMockito;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+import static junit.framework.TestCase.*;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.when;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(RangerHawqAuthorizer.class)
+public class RangerHawqPluginResourceTest {
+
+    private RangerHawqPluginResource resource;
+
+    @Mock
+    private RangerHawqAuthorizer mockAuthorizer;
+    @Mock
+    private AuthorizationResponse mockResponse;
+    @Mock
+    private RuntimeException mockException;
+
+    @Before
+    public void setup() throws Exception {
+        PowerMockito.mockStatic(RangerHawqAuthorizer.class);
+        when(RangerHawqAuthorizer.getInstance()).thenReturn(mockAuthorizer);
+        resource = new RangerHawqPluginResource();
+    }
+
+    @Test
+    public void testGetVersion() {
+        String version = (String) resource.version().getEntity();
+        assertEquals("{\"version\":\"version-test\"}", version);
+    }
+
+    @Test
+    public void testAuthorizeSuccess() {
+        when(mockAuthorizer.isAccessAllowed(any(AuthorizationRequest.class))).thenReturn(mockResponse);
+        AuthorizationResponse response = resource.authorize(new AuthorizationRequest());
+        assertNotNull(response);
+        assertEquals(mockResponse, response);
+    }
+
+    @Test
+    public void testAuthorizeException() {
+        when(mockAuthorizer.isAccessAllowed(any(AuthorizationRequest.class))).thenThrow(mockException);
+        try {
+            resource.authorize(new AuthorizationRequest());
+            fail("should've thrown exception");
+        } catch (Exception e) {
+            assertSame(mockException, e);
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/ServiceExceptionMapperTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/ServiceExceptionMapperTest.java b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/ServiceExceptionMapperTest.java
new file mode 100644
index 0000000..e81b76c
--- /dev/null
+++ b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/ServiceExceptionMapperTest.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hawq.ranger.authorization;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.runners.MockitoJUnitRunner;
+
+import javax.ws.rs.core.Response;
+
+import static org.junit.Assert.assertEquals;
+
+@RunWith(MockitoJUnitRunner.class)
+public class ServiceExceptionMapperTest {
+
+    private ServiceExceptionMapper mapper;
+
+    @Before
+    public void setup() {
+        mapper = new ServiceExceptionMapper();
+    }
+
+    @Test
+    public void testIllegalArgumentException() {
+
+        Response response = mapper.toResponse(new IllegalArgumentException("reason"));
+        ServiceExceptionMapper.ErrorPayload entity = (ServiceExceptionMapper.ErrorPayload) response.getEntity();
+
+        assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus());
+        assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), entity.getStatus());
+        assertEquals("reason", entity.getMessage());
+    }
+
+    @Test
+    public void testOtherException() {
+
+        Response response = mapper.toResponse(new Exception("reason"));
+        ServiceExceptionMapper.ErrorPayload entity = (ServiceExceptionMapper.ErrorPayload) response.getEntity();
+
+        assertEquals(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), response.getStatus());
+        assertEquals(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), entity.getStatus());
+        assertEquals("reason", entity.getMessage());
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/UtilsTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/UtilsTest.java b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/UtilsTest.java
new file mode 100644
index 0000000..bf62785
--- /dev/null
+++ b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/UtilsTest.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization;
+
+import org.junit.Test;
+
+import static org.apache.hawq.ranger.authorization.Utils.APP_ID_PROPERTY;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This test class uses values from rps.properties file in test/resources directory.
+ */
+public class UtilsTest {
+
+    @Test
+    public void testCustomAppId_SystemEnv() throws Exception {
+        System.setProperty(APP_ID_PROPERTY, "app-id");
+        assertEquals("app-id", Utils.getAppId());
+        System.clearProperty(APP_ID_PROPERTY);
+    }
+
+    @Test
+    public void testCustomAppId_PropertyFile() throws Exception {
+        assertEquals("instance-test", Utils.getAppId());
+    }
+
+    @Test
+    public void testGetVersion() throws Exception {
+        assertEquals("version-test", Utils.getVersion());
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/model/HawqPrivilegeTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/model/HawqPrivilegeTest.java b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/model/HawqPrivilegeTest.java
new file mode 100644
index 0000000..39dd3cc
--- /dev/null
+++ b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/model/HawqPrivilegeTest.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization.model;
+
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+
+public class HawqPrivilegeTest {
+
+    @Test
+    public void testSerialization() throws IOException {
+        assertEquals("create", HawqPrivilege.create.toValue());
+        assertEquals("create-schema", HawqPrivilege.create_schema.toValue());
+        assertEquals("usage-schema", HawqPrivilege.usage_schema.toValue());
+
+        ObjectMapper mapper = new ObjectMapper();
+        assertEquals("{\"value\":\"create\"}", mapper.writeValueAsString(new PrivilegeHolder(HawqPrivilege.create)));
+        assertEquals("{\"value\":\"create-schema\"}", mapper.writeValueAsString(new PrivilegeHolder(HawqPrivilege.create_schema)));
+        assertEquals("{\"value\":\"usage-schema\"}", mapper.writeValueAsString(new PrivilegeHolder(HawqPrivilege.usage_schema)));
+    }
+
+    @Test
+    public void testDeserialization() throws IOException {
+        assertNull(HawqPrivilege.fromString(null));
+        assertSame(HawqPrivilege.create, HawqPrivilege.fromString("create"));
+        assertSame(HawqPrivilege.create, HawqPrivilege.fromString("CREATE"));
+        assertSame(HawqPrivilege.create, HawqPrivilege.fromString("CreATe"));
+        assertSame(HawqPrivilege.create_schema, HawqPrivilege.fromString("CreATe-schema"));
+        assertSame(HawqPrivilege.usage_schema, HawqPrivilege.fromString("USage-schema"));
+
+
+        ObjectMapper mapper = new ObjectMapper();
+        assertSame(HawqPrivilege.create, mapper.readValue("{\"value\": \"create\"}", PrivilegeHolder.class).value);
+        assertSame(HawqPrivilege.create, mapper.readValue("{\"value\": \"CREATE\"}", PrivilegeHolder.class).value);
+        assertSame(HawqPrivilege.create, mapper.readValue("{\"value\": \"creATe\"}", PrivilegeHolder.class).value);
+        assertSame(HawqPrivilege.create_schema, mapper.readValue("{\"value\": \"CreATe-schema\"}", PrivilegeHolder.class).value);
+        assertSame(HawqPrivilege.usage_schema, mapper.readValue("{\"value\": \"USage-schema\"}", PrivilegeHolder.class).value);
+    }
+
+    public static class PrivilegeHolder {
+        public HawqPrivilege value;
+        PrivilegeHolder () {
+        }
+        PrivilegeHolder(HawqPrivilege value) {
+            this.value = value;
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/model/HawqResourceTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/model/HawqResourceTest.java b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/model/HawqResourceTest.java
new file mode 100644
index 0000000..f59a600
--- /dev/null
+++ b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/model/HawqResourceTest.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization.model;
+
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+
+public class HawqResourceTest {
+
+    @Test
+    public void testCaseInsensitiveDeserialization() throws IOException {
+        assertNull(HawqResource.fromString(null));
+        assertSame(HawqResource.database, HawqResource.fromString("database"));
+        assertSame(HawqResource.database, HawqResource.fromString("DATABASE"));
+        assertSame(HawqResource.database, HawqResource.fromString("datABAse"));
+
+        ObjectMapper mapper = new ObjectMapper();
+        assertSame(HawqResource.database, mapper.readValue("{\"value\": \"database\"}", ResourceHolder.class).value);
+        assertSame(HawqResource.database, mapper.readValue("{\"value\": \"DATABASE\"}", ResourceHolder.class).value);
+        assertSame(HawqResource.database, mapper.readValue("{\"value\": \"datABAse\"}", ResourceHolder.class).value);
+    }
+
+    public static class ResourceHolder {
+        public HawqResource value;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/test/resources/log4j.properties b/ranger-plugin/service/src/test/resources/log4j.properties
new file mode 100644
index 0000000..b9888df
--- /dev/null
+++ b/ranger-plugin/service/src/test/resources/log4j.properties
@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# see debug messages during unit tests
+#project.root.logger=DEBUG,console
+
+# suppress all logging output during unit tests
+project.root.logger=FATAL,devnull
+
+#
+# Loggers
+#
+log4j.rootLogger=${project.root.logger}
+
+# ignore most errors from the Apache Ranger and Hadoop for unit tests
+log4j.logger.org.apache.ranger=FATAL
+log4j.logger.org.apache.hadoop=FATAL
+
+#
+# Appenders
+#
+
+# nothing
+log4j.appender.devnull=org.apache.log4j.varia.NullAppender
+
+# console
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/test/resources/rps.properties
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/test/resources/rps.properties b/ranger-plugin/service/src/test/resources/rps.properties
new file mode 100644
index 0000000..1fd50e5
--- /dev/null
+++ b/ranger-plugin/service/src/test/resources/rps.properties
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ranger.hawq.instance=instance-test
+version=version-test
\ No newline at end of file


[13/50] [abbrv] incubator-hawq git commit: HAWQ-1215. Support Complextypes with HiveORC

Posted by es...@apache.org.
HAWQ-1215. Support Complextypes with HiveORC


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/3b15739a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/3b15739a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/3b15739a

Branch: refs/heads/2.1.0.0-incubating
Commit: 3b15739a009601049f7343131abe889b204b4d62
Parents: aa5792d
Author: Kavinder Dhaliwal <ka...@gmail.com>
Authored: Fri Dec 23 16:27:58 2016 -0800
Committer: Kavinder Dhaliwal <ka...@gmail.com>
Committed: Wed Jan 11 14:14:36 2017 -0800

----------------------------------------------------------------------
 .../plugins/hive/HiveInputFormatFragmenter.java |  5 +--
 .../pxf/plugins/hive/HiveORCSerdeResolver.java  | 32 ++++++++++++++++++++
 .../plugins/hive/utilities/HiveUtilities.java   |  3 +-
 3 files changed, 37 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/3b15739a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveInputFormatFragmenter.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveInputFormatFragmenter.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveInputFormatFragmenter.java
index 051a246..ca4501b 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveInputFormatFragmenter.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveInputFormatFragmenter.java
@@ -26,7 +26,6 @@ import org.apache.hawq.pxf.api.UserDataException;
 import org.apache.hawq.pxf.api.io.DataType;
 import org.apache.hawq.pxf.api.utilities.ColumnDescriptor;
 import org.apache.hawq.pxf.api.utilities.InputData;
-import org.apache.hawq.pxf.plugins.hive.utilities.EnumHiveToHawqType;
 import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -35,6 +34,7 @@ import org.apache.hadoop.hive.metastore.api.Table;
 
 import java.util.Arrays;
 import java.util.List;
+import java.util.Properties;
 
 /**
  * Specialized Hive fragmenter for RC and Text files tables. Unlike the
@@ -55,10 +55,11 @@ import java.util.List;
  */
 public class HiveInputFormatFragmenter extends HiveDataFragmenter {
     private static final Log LOG = LogFactory.getLog(HiveInputFormatFragmenter.class);
-    private static final int EXPECTED_NUM_OF_TOKS = 3;
+    private static final int EXPECTED_NUM_OF_TOKS = 4;
     public static final int TOK_SERDE = 0;
     public static final int TOK_KEYS = 1;
     public static final int TOK_FILTER_DONE = 2;
+    public static final int TOK_COL_TYPES = 3;
 
     /** Defines the Hive input formats currently supported in pxf */
     public enum PXF_HIVE_INPUT_FORMATS {

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/3b15739a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCSerdeResolver.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCSerdeResolver.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCSerdeResolver.java
index 7673713..93aa474 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCSerdeResolver.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCSerdeResolver.java
@@ -45,6 +45,7 @@ public class HiveORCSerdeResolver extends HiveResolver {
     private static final Log LOG = LogFactory.getLog(HiveORCSerdeResolver.class);
     private OrcSerde deserializer;
     private HiveInputFormatFragmenter.PXF_HIVE_SERDES serdeType;
+    private String typesString;
 
     public HiveORCSerdeResolver(InputData input) throws Exception {
         super(input);
@@ -61,6 +62,7 @@ public class HiveORCSerdeResolver extends HiveResolver {
             throw new UnsupportedTypeException("Unsupported Hive Serde: " + serdeEnumStr);
         }
         partitionKeys = toks[HiveInputFormatFragmenter.TOK_KEYS];
+        typesString = toks[HiveInputFormatFragmenter.TOK_COL_TYPES];
         collectionDelim = input.getUserProperty("COLLECTION_DELIM") == null ? COLLECTION_DELIM
                 : input.getUserProperty("COLLECTION_DELIM");
         mapkeyDelim = input.getUserProperty("MAPKEY_DELIM") == null ? MAPKEY_DELIM
@@ -102,11 +104,19 @@ public class HiveORCSerdeResolver extends HiveResolver {
 
         StringBuilder columnNames = new StringBuilder(numberOfDataColumns * 2); // column + delimiter
         StringBuilder columnTypes = new StringBuilder(numberOfDataColumns * 2); // column + delimiter
+        String[] cols = typesString.split(":");
+        String[] hiveColTypes = new String[numberOfDataColumns];
+        parseColTypes(cols, hiveColTypes);
+
         String delim = ",";
         for (int i = 0; i < numberOfDataColumns; i++) {
             ColumnDescriptor column = input.getColumn(i);
             String columnName = column.columnName();
             String columnType = HiveUtilities.toCompatibleHiveType(DataType.get(column.columnTypeCode()), column.columnTypeModifiers());
+            //Complex Types will have a mismatch between Hive and Hawq type
+            if (!columnType.equals(hiveColTypes[i])) {
+                columnType = hiveColTypes[i];
+            }
             if(i > 0) {
                 columnNames.append(delim);
                 columnTypes.append(delim);
@@ -125,4 +135,26 @@ public class HiveORCSerdeResolver extends HiveResolver {
 
         deserializer.initialize(new JobConf(new Configuration(), HiveORCSerdeResolver.class), serdeProperties);
     }
+
+    private void parseColTypes(String[] cols, String[] output) {
+        int i = 0;
+        StringBuilder structTypeBuilder = new StringBuilder();
+        boolean inStruct = false;
+        for (String str : cols) {
+            if (str.contains("struct")) {
+                structTypeBuilder = new StringBuilder();
+                inStruct = true;
+                structTypeBuilder.append(str);
+            } else if (inStruct) {
+                structTypeBuilder.append(':');
+                structTypeBuilder.append(str);
+                if (str.contains(">")) {
+                    inStruct = false;
+                    output[i++] = structTypeBuilder.toString();
+                }
+            } else {
+                output[i++] = str;
+            }
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/3b15739a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
index ffd66b8..f7ebf4d 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
@@ -444,9 +444,10 @@ public class HiveUtilities {
             String inputFormatName = partData.storageDesc.getInputFormat();
             String serdeName = partData.storageDesc.getSerdeInfo().getSerializationLib();
             String partitionKeys = serializePartitionKeys(partData);
+            String colTypes = partData.properties.getProperty("columns.types");
             assertFileType(inputFormatName, partData);
             userData = assertSerde(serdeName, partData) + HiveDataFragmenter.HIVE_UD_DELIM
-                    + partitionKeys + HiveDataFragmenter.HIVE_UD_DELIM + filterInFragmenter;
+                    + partitionKeys + HiveDataFragmenter.HIVE_UD_DELIM + filterInFragmenter + HiveDataFragmenter.HIVE_UD_DELIM + colTypes;
         } else if (HiveDataFragmenter.class.isAssignableFrom(fragmenterClass)){
             String inputFormatName = partData.storageDesc.getInputFormat();
             String serdeName = partData.storageDesc.getSerdeInfo().getSerializationLib();


[28/50] [abbrv] incubator-hawq git commit: HAWQ-1276. The error message is not friendly when ranger plugin service is unavailable.

Posted by es...@apache.org.
HAWQ-1276. The error message is not friendly when ranger plugin service is unavailable.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/e46f06cc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/e46f06cc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/e46f06cc

Branch: refs/heads/2.1.0.0-incubating
Commit: e46f06cc95d5bd8212cb1edf8331856461891dc6
Parents: afac2df
Author: stanlyxiang <st...@gmail.com>
Authored: Fri Jan 13 11:33:40 2017 +0800
Committer: Wen Lin <wl...@pivotal.io>
Committed: Tue Jan 17 16:34:04 2017 +0800

----------------------------------------------------------------------
 src/backend/catalog/aclchk.c   | 13 ++++++-------
 src/backend/libpq/rangerrest.c | 28 +++++++++++++---------------
 src/backend/tcop/postgres.c    |  5 +++--
 3 files changed, 22 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/e46f06cc/src/backend/catalog/aclchk.c
----------------------------------------------------------------------
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 200d9cb..ed36330 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -2739,7 +2739,7 @@ List *pg_rangercheck_batch(List *arg_list)
   List *aclresults = NIL;
   List *requestargs = NIL;
   ListCell *arg;
-  elog(LOG, "rangeracl batch check, acl list length:%d\n", arg_list->length);
+  elog(DEBUG3, "ranger acl batch check, acl list length: %d\n", arg_list->length);
   foreach(arg, arg_list) {
     RangerPrivilegeArgs *arg_ptr = (RangerPrivilegeArgs *) lfirst(arg);
 
@@ -2753,7 +2753,7 @@ List *pg_rangercheck_batch(List *arg_list)
     RangerPrivilegeResults *aclresult = (RangerPrivilegeResults *) palloc(sizeof(RangerPrivilegeResults));
     aclresult->result = RANGERCHECK_NO_PRIV;
     aclresult->relOid = object_oid;
-    // this two sign fields will be set in create_ranger_request_json()
+    /* this two sign fields will be set in function create_ranger_request_json */
     aclresult->resource_sign = 0;
     aclresult->privilege_sign = 0;
     aclresults = lappend(aclresults, aclresult);
@@ -2771,7 +2771,6 @@ List *pg_rangercheck_batch(List *arg_list)
   int ret = check_privilege_from_ranger(requestargs, aclresults);
   if (ret < 0)
   {
-	  elog(WARNING, "ranger service unavailable or unexpected error\n");
 	  ListCell *result;
 	  foreach(result, aclresults) {
 		  RangerPrivilegeResults *result_ptr = (RangerPrivilegeResults *) lfirst(result);
@@ -2808,13 +2807,13 @@ pg_rangercheck(AclObjectKind objkind, Oid object_oid, Oid roleid,
 	List* actions = getActionName(mask);
 	bool isAll = (how == ACLMASK_ALL) ? true: false;
 
-	elog(LOG, "rangeraclcheck kind:%d,objectname:%s,role:%s,mask:%u\n",objkind,objectname,rolename,mask);
+	elog(DEBUG3, "ranger acl check kind: %d, object name: %s, role: %s, mask: %u\n", objkind, objectname, rolename, mask);
 
 	List *resultargs = NIL;
     RangerPrivilegeResults *aclresult = (RangerPrivilegeResults *) palloc(sizeof(RangerPrivilegeResults));
     aclresult->result = RANGERCHECK_NO_PRIV;
     aclresult->relOid = object_oid;
-	// this two sign fields will be set in create_ranger_request_json()
+	/* this two sign fields will be set in function create_ranger_request_json */
 	aclresult->resource_sign = 0;
 	aclresult->privilege_sign = 0;
     resultargs = lappend(resultargs, aclresult);
@@ -2834,7 +2833,7 @@ pg_rangercheck(AclObjectKind objkind, Oid object_oid, Oid roleid,
 	{
 		ListCell *arg;
 		foreach(arg, resultargs) {
-			// only one element
+			/* only one element */
 			RangerPrivilegeResults *arg_ptr = (RangerPrivilegeResults *) lfirst(arg);
 			if (arg_ptr->result == RANGERCHECK_OK)
 				result = ACLCHECK_OK;
@@ -2893,7 +2892,7 @@ pg_aclmask(AclObjectKind objkind, Oid table_oid, Oid roleid,
 		case ACL_KIND_EXTPROTOCOL:
 			return pg_extprotocol_aclmask(table_oid, roleid, mask, how);
 		default:
-			elog(ERROR, "unrecognized objkind: %d",
+			elog(ERROR, "unrecognized object kind : %d",
 				 (int) objkind);
 			/* not reached, but keep compiler quiet */
 			return ACL_NO_RIGHTS;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/e46f06cc/src/backend/libpq/rangerrest.c
----------------------------------------------------------------------
diff --git a/src/backend/libpq/rangerrest.c b/src/backend/libpq/rangerrest.c
index 74777dc..dc5d193 100644
--- a/src/backend/libpq/rangerrest.c
+++ b/src/backend/libpq/rangerrest.c
@@ -85,14 +85,14 @@ static int parse_ranger_response(char* buffer, List *result_list)
 	struct json_object *response = json_tokener_parse(buffer);
 	if (response == NULL) 
 	{
-		elog(WARNING, "json_tokener_parse failed");
+		elog(WARNING, "failed to parse json tokener.");
 		return -1;
 	}
 
 	struct json_object *accessObj = NULL;
 	if (!json_object_object_get_ex(response, "access", &accessObj))
 	{
-		elog(WARNING, "get json access field failed");
+		elog(WARNING, "failed to get json \"access\" field.");
 		return -1;
 	}
 
@@ -120,7 +120,7 @@ static int parse_ranger_response(char* buffer, List *result_list)
 		const char *privilege_str = json_object_get_string(jprivilege);
 		uint32 resource_sign = string_hash(resource_str, strlen(resource_str));
 		uint32 privilege_sign = string_hash(privilege_str, strlen(privilege_str));
-		elog(DEBUG3, "ranger response access sign, resource_str:%s, privilege_str:%s", 
+		elog(DEBUG3, "ranger response access sign, resource_str: %s, privilege_str: %s",
 			resource_str, privilege_str);
 
 		ListCell *result;
@@ -294,7 +294,7 @@ static json_object *create_ranger_request_json(List *request_list, List *result_
 				break;
 			}
 			default:
-				elog(ERROR, "unrecognized objkind: %d", (int) kind);
+				elog(ERROR, "unsupported object kind : %s", AclObjectKindStr[kind]);
 		} // switch
 		json_object_object_add(jelement, "resource", jresource);
 
@@ -320,7 +320,6 @@ static json_object *create_ranger_request_json(List *request_list, List *result_
 		result_ptr->privilege_sign = string_hash(privilege_str, strlen(privilege_str));
 		elog(DEBUG3, "request access sign, resource_str:%s, privilege_str:%s", 
 			resource_str, privilege_str);
-		
 		j++;
 	} // foreach
 	char str[32];
@@ -354,19 +353,19 @@ static size_t write_callback(char *contents, size_t size, size_t nitems,
 	int original_size = curl->response.buffer_size;
 	while(curl->response.response_size + realsize >= curl->response.buffer_size)
 	{
-		/*double the buffer size if the buffer is not enough.*/
+		/* double the buffer size if the buffer is not enough.*/
 		curl->response.buffer_size = curl->response.buffer_size * 2;
 	}
 	if(original_size < curl->response.buffer_size)
 	{
-		/* our repalloc is not same as realloc, repalloc's first param(buffer) can not be NULL */
+		/* repalloc is not same as realloc, repalloc's first parameter cannot be NULL */
 		curl->response.buffer = repalloc(curl->response.buffer, curl->response.buffer_size);
 	}
 	elog(DEBUG3, "ranger restful response size is %d. response buffer size is %d.", curl->response.response_size, curl->response.buffer_size);
 	if (curl->response.buffer == NULL)
 	{
-		/* out of memory! */
-		elog(WARNING, "not enough memory for Ranger response");
+		/* allocate memory failed. probably out of memory */
+		elog(WARNING, "cannot allocate memory for ranger response");
 		return 0;
 	}
 	memcpy(curl->response.buffer + curl->response.response_size, contents, realsize);
@@ -413,7 +412,6 @@ static int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_HTTPHEADER, headers);
 
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_POSTFIELDS,request);
-	//"{\"requestId\": 1,\"user\": \"hubert\",\"clientIp\":\"123.0.0.21\",\"context\": \"SELECT * FROM sales\",\"access\":[{\"resource\":{\"database\":\"a-database\",\"schema\":\"a-schema\",\"table\":\"sales\"},\"privileges\": [\"select\"]}]}");
 	/* send all data to this function  */
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_WRITEFUNCTION, write_callback);
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_WRITEDATA, (void *)curl_handle);
@@ -427,13 +425,13 @@ static int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
 	/* check for errors */
 	if(res != CURLE_OK)
 	{
-		elog(WARNING, "curl_easy_perform() failed: %s\n",
-			curl_easy_strerror(res));
+		elog(WARNING, "ranger plugin service from http://%s:%d/%s is unavailable : %s.\n",
+				rps_addr_host, rps_addr_port, rps_addr_suffix, curl_easy_strerror(res));
 	}
 	else
 	{
 		ret = 0;
-		elog(DEBUG3, "retrieved %d bytes from ranger restful response.",
+		elog(DEBUG3, "retrieved %d bytes data from ranger restful response.",
 			curl_handle->response.response_size);
 	}
 
@@ -469,8 +467,8 @@ int check_privilege_from_ranger(List *request_list, List *result_list)
 	int ret = parse_ranger_response(curl_context_ranger.response.buffer, result_list);
 	if (ret < 0)
 	{
-		elog(WARNING, "parse ranger response failed, response[%s]", 
-			curl_context_ranger.response.buffer == NULL? "":curl_context_ranger.response.buffer);
+		elog(WARNING, "parse ranger response failed, ranger response content is %s",
+			curl_context_ranger.response.buffer == NULL? "empty.":curl_context_ranger.response.buffer);
 	}
 	if (curl_context_ranger.response.buffer != NULL)
 	{

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/e46f06cc/src/backend/tcop/postgres.c
----------------------------------------------------------------------
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index e1bfb1d..fc71eda 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -4392,7 +4392,7 @@ PostgresMain(int argc, char *argv[], const char *username)
 	}
 
 	/* for enable ranger*/
-	if (enable_ranger && !curl_context_ranger.hasInited)
+	if (AmIMaster() && enable_ranger && !curl_context_ranger.hasInited)
 	{
 		memset(&curl_context_ranger, 0, sizeof(curl_context_t));
 		curl_global_init(CURL_GLOBAL_ALL);
@@ -4402,11 +4402,12 @@ PostgresMain(int argc, char *argv[], const char *username)
 			/* cleanup curl stuff */
 			/* no need to cleanup curl_handle since it's null. just cleanup curl global.*/
 			curl_global_cleanup();
+			elog(ERROR, "initialize global curl context failed.");
 		}
 		curl_context_ranger.hasInited = true;
 		curl_context_ranger.response.buffer = palloc0(CURL_RES_BUFFER_SIZE);
 		curl_context_ranger.response.buffer_size = CURL_RES_BUFFER_SIZE;
-		elog(DEBUG3, "when enable ranger, init global struct for privileges check.");
+		elog(DEBUG3, "initialize global curl context for privileges check.");
 		on_proc_exit(curl_finalize, 0);
 	}
 	/*


[33/50] [abbrv] incubator-hawq git commit: HAWQ-762. Login to kerberos if credentials are no longer valid

Posted by es...@apache.org.
HAWQ-762. Login to kerberos if credentials are no longer valid


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/8261c13e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/8261c13e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/8261c13e

Branch: refs/heads/2.1.0.0-incubating
Commit: 8261c13ef73de9109ec5340304471871f544fa17
Parents: 7f36b35
Author: Kavinder Dhaliwal <ka...@gmail.com>
Authored: Fri Jan 6 11:56:29 2017 -0800
Committer: Kavinder Dhaliwal <ka...@gmail.com>
Committed: Wed Jan 18 14:06:32 2017 -0800

----------------------------------------------------------------------
 .../hawq/pxf/service/utilities/SecuredHDFS.java | 11 +++--
 .../pxf/service/utilities/SecuredHDFSTest.java  | 45 ++++++++++----------
 2 files changed, 31 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8261c13e/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/utilities/SecuredHDFS.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/utilities/SecuredHDFS.java b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/utilities/SecuredHDFS.java
index f442a6d..1e1bcd3 100644
--- a/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/utilities/SecuredHDFS.java
+++ b/pxf/pxf-service/src/main/java/org/apache/hawq/pxf/service/utilities/SecuredHDFS.java
@@ -53,6 +53,14 @@ public class SecuredHDFS {
     public static void verifyToken(ProtocolData protData, ServletContext context) {
         try {
             if (UserGroupInformation.isSecurityEnabled()) {
+                /*
+                 * HAWQ-1215: The verify token method validates that the token sent from
+                 * Hawq to PXF is valid. However, this token is for a user other than
+                 * 'pxf'. The following line ensures that before attempting any secure communication
+                 * PXF tries to relogin in the case that its own ticket is about to expire
+                 * #reloginFromKeytab is a no-op if the ticket is not near expiring
+                 */
+                UserGroupInformation.getLoginUser().reloginFromKeytab();
                 Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
                 String tokenString = protData.getToken();
                 token.decodeFromUrlString(tokenString);
@@ -103,9 +111,6 @@ public class SecuredHDFS {
             LOG.debug("user " + userGroupInformation.getUserName() + " ("
                     + userGroupInformation.getShortUserName()
                     + ") authenticated");
-
-            // re-login if necessary
-            userGroupInformation.checkTGTAndReloginFromKeytab();
         } catch (IOException e) {
             throw new SecurityException("Failed to verify delegation token "
                     + e, e);

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8261c13e/pxf/pxf-service/src/test/java/org/apache/hawq/pxf/service/utilities/SecuredHDFSTest.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/test/java/org/apache/hawq/pxf/service/utilities/SecuredHDFSTest.java b/pxf/pxf-service/src/test/java/org/apache/hawq/pxf/service/utilities/SecuredHDFSTest.java
index 4944a35..9aecce0 100644
--- a/pxf/pxf-service/src/test/java/org/apache/hawq/pxf/service/utilities/SecuredHDFSTest.java
+++ b/pxf/pxf-service/src/test/java/org/apache/hawq/pxf/service/utilities/SecuredHDFSTest.java
@@ -29,24 +29,25 @@ import org.powermock.core.classloader.annotations.PrepareForTest;
 import org.powermock.modules.junit4.PowerMockRunner;
 
 import javax.servlet.ServletContext;
-import java.util.HashMap;
-import java.util.Map;
+import java.io.IOException;
 
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 
 @RunWith(PowerMockRunner.class)
 @PrepareForTest({UserGroupInformation.class})
 public class SecuredHDFSTest {
-    Map<String, String> parameters;
     ProtocolData mockProtocolData;
     ServletContext mockContext;
 
     @Test
-    public void invalidTokenThrows() {
+    public void invalidTokenThrows() throws IOException {
         when(UserGroupInformation.isSecurityEnabled()).thenReturn(true);
+        UserGroupInformation ugi = mock(UserGroupInformation.class);
+        when(UserGroupInformation.getLoginUser()).thenReturn(ugi);
         when(mockProtocolData.getToken()).thenReturn("This is odd");
 
         try {
@@ -57,30 +58,30 @@ public class SecuredHDFSTest {
         }
     }
 
+    @Test
+    public void loggedOutUser() throws IOException {
+        when(UserGroupInformation.isSecurityEnabled()).thenReturn(true);
+        UserGroupInformation ugi = mock(UserGroupInformation.class);
+        when(UserGroupInformation.getLoginUser()).thenReturn(ugi);
+        when(mockProtocolData.getToken()).thenReturn("This is odd");
+
+        try {
+            SecuredHDFS.verifyToken(mockProtocolData, mockContext);
+            fail("invalid X-GP-TOKEN should throw");
+        } catch (SecurityException e) {
+            verify(ugi).reloginFromKeytab();
+            assertEquals("Failed to verify delegation token java.io.EOFException", e.getMessage());
+        }
+    }
+
     /*
      * setUp function called before each test
 	 */
     @Before
     public void setUp() {
-        parameters = new HashMap<>();
-
-        parameters.put("X-GP-ALIGNMENT", "all");
-        parameters.put("X-GP-SEGMENT-ID", "-44");
-        parameters.put("X-GP-SEGMENT-COUNT", "2");
-        parameters.put("X-GP-HAS-FILTER", "0");
-        parameters.put("X-GP-FORMAT", "TEXT");
-        parameters.put("X-GP-URL-HOST", "my://bags");
-        parameters.put("X-GP-URL-PORT", "-8020");
-        parameters.put("X-GP-ATTRS", "-1");
-        parameters.put("X-GP-ACCESSOR", "are");
-        parameters.put("X-GP-RESOLVER", "packed");
-        parameters.put("X-GP-DATA-DIR", "i'm/ready/to/go");
-        parameters.put("X-GP-FRAGMENT-METADATA", "U29tZXRoaW5nIGluIHRoZSB3YXk=");
-        parameters.put("X-GP-I'M-STANDING-HERE", "outside-your-door");
-
-        mockProtocolData = mock(ProtocolData.class);        
+        mockProtocolData = mock(ProtocolData.class);
         mockContext = mock(ServletContext.class);
 
         PowerMockito.mockStatic(UserGroupInformation.class);
     }
-}
+}
\ No newline at end of file


[40/50] [abbrv] incubator-hawq git commit: HAWQ-1291. Fix the name of privilege when create temp table.

Posted by es...@apache.org.
HAWQ-1291. Fix the name of privilege when create temp table.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/eb2ea907
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/eb2ea907
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/eb2ea907

Branch: refs/heads/2.1.0.0-incubating
Commit: eb2ea9074460d911eeed5205970bd4076188c3fc
Parents: d2608de
Author: hzhang2 <zh...@163.com>
Authored: Tue Jan 24 14:57:13 2017 +0800
Committer: hzhang2 <zh...@163.com>
Committed: Tue Jan 24 14:57:13 2017 +0800

----------------------------------------------------------------------
 src/backend/catalog/aclchk.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/eb2ea907/src/backend/catalog/aclchk.c
----------------------------------------------------------------------
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index ed36330..33fa9ab 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -2646,7 +2646,7 @@ char *getNameFromOid(AclObjectKind objkind, Oid object_oid)
 
 char actionName[12][12] = {"INSERT","SELECT","UPDATE", "DELETE",
     "TRUNCATE", "REFERENCES", "TRIGGER", "EXECUTE", "USAGE",
-    "CREATE", "CREATE_TEMP", "CONNECT"};
+    "CREATE", "TEMP", "CONNECT"};
 
 List *getActionName(AclMode mask)
 {


[11/50] [abbrv] incubator-hawq git commit: HAWQ-1258. Segment resource manager does not switch back when it cannot resolve standby host name

Posted by es...@apache.org.
HAWQ-1258. Segment resource manager does not switch back when it cannot resolve standby host name


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/2a7c20f2
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/2a7c20f2
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/2a7c20f2

Branch: refs/heads/2.1.0.0-incubating
Commit: 2a7c20f2b5eff1563d3f2a7c8f7504bec2099bd3
Parents: ad71873
Author: Yi <yj...@pivotal.io>
Authored: Wed Jan 11 19:40:08 2017 +1100
Committer: Yi <yj...@pivotal.io>
Committed: Wed Jan 11 19:40:08 2017 +1100

----------------------------------------------------------------------
 .../resourcemanager/communication/rmcomm_RMSEG2RM.c      | 10 ++++++----
 src/backend/resourcemanager/include/dynrm.h              |  2 ++
 src/backend/resourcemanager/resourcemanager_RMSEG.c      | 11 +++++++++++
 src/backend/utils/misc/guc.c                             |  2 +-
 4 files changed, 20 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/2a7c20f2/src/backend/resourcemanager/communication/rmcomm_RMSEG2RM.c
----------------------------------------------------------------------
diff --git a/src/backend/resourcemanager/communication/rmcomm_RMSEG2RM.c b/src/backend/resourcemanager/communication/rmcomm_RMSEG2RM.c
index 92946c8..4c93f78 100644
--- a/src/backend/resourcemanager/communication/rmcomm_RMSEG2RM.c
+++ b/src/backend/resourcemanager/communication/rmcomm_RMSEG2RM.c
@@ -107,8 +107,10 @@ int sendIMAlive(int  *errorcode,
 	if ( res != FUNC_RETURN_OK )
 	{
 		rm_pfree(AsyncCommContext, context);
-		elog(WARNING, "Fail to register asynchronous connection for sending "
-					  "IMAlive message. %d", res);
+		elog(LOG, "failed to register asynchronous connection for sending "
+			      "IMAlive message. %d", res);
+		/* Always switch if fail to register connection here. */
+		switchIMAliveSendingTarget();
 		return res;
 	}
 
@@ -140,7 +142,7 @@ void receivedIMAliveResponse(AsyncCommMessageHandlerContext  context,
 		 buffersize != sizeof(RPCResponseIMAliveData) ) {
 		elog(WARNING, "Segment's resource manager received wrong response for "
 					  "heart-beat request.");
-		DRMGlobalInstance->SendToStandby = !DRMGlobalInstance->SendToStandby;
+		switchIMAliveSendingTarget();
 	}
 	else
 	{
@@ -165,7 +167,7 @@ void sentIMAliveError(AsyncCommMessageHandlerContext context)
 	else
 		elog(WARNING, "Segment's resource manager sending IMAlive message "
 					  "switches from master to standby");
-	DRMGlobalInstance->SendToStandby = !DRMGlobalInstance->SendToStandby;
+	switchIMAliveSendingTarget();
 }
 
 void sentIMAliveCleanUp(AsyncCommMessageHandlerContext context)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/2a7c20f2/src/backend/resourcemanager/include/dynrm.h
----------------------------------------------------------------------
diff --git a/src/backend/resourcemanager/include/dynrm.h b/src/backend/resourcemanager/include/dynrm.h
index bd4a303..a6309c8 100644
--- a/src/backend/resourcemanager/include/dynrm.h
+++ b/src/backend/resourcemanager/include/dynrm.h
@@ -322,4 +322,6 @@ int  initializeSocketServer_RMSEG(void);
 int  MainHandlerLoop_RMSEG(void);
 
 void checkAndBuildFailedTmpDirList(void);
+
+void switchIMAliveTarget(void);
 #endif //DYNAMIC_RESOURCE_MANAGEMENT_H

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/2a7c20f2/src/backend/resourcemanager/resourcemanager_RMSEG.c
----------------------------------------------------------------------
diff --git a/src/backend/resourcemanager/resourcemanager_RMSEG.c b/src/backend/resourcemanager/resourcemanager_RMSEG.c
index f3042eb..f8afe5a 100644
--- a/src/backend/resourcemanager/resourcemanager_RMSEG.c
+++ b/src/backend/resourcemanager/resourcemanager_RMSEG.c
@@ -275,3 +275,14 @@ void checkAndBuildFailedTmpDirList(void)
 			  "directory, which costs " UINT64_FORMAT " us",
 			  endtime - starttime);
 }
+
+void switchIMAliveSendingTarget(void)
+{
+	/* We switch to standby server only when it is correctly set. */
+	if (pg_strcasecmp(standby_addr_host, "none") != 0)
+	{
+		DRMGlobalInstance->SendToStandby = !DRMGlobalInstance->SendToStandby;
+		elog(LOG, "segment will send heart-beat to %s from now on",
+				  DRMGlobalInstance->SendToStandby ? "standby" : "master");
+	}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/2a7c20f2/src/backend/utils/misc/guc.c
----------------------------------------------------------------------
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index dccd599..fbf19cf 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -8200,7 +8200,7 @@ static struct config_string ConfigureNamesString[] =
 			NULL
 		},
 		&standby_addr_host,
-		"localhost", NULL, NULL
+		"none", NULL, NULL
 	},
 
 	{


[37/50] [abbrv] incubator-hawq git commit: HAWQ-1193. Add createEncryption, getEZForPath, listEncryptionZones RPC for libhdfs3.

Posted by es...@apache.org.
HAWQ-1193. Add createEncryption, getEZForPath, listEncryptionZones RPC for libhdfs3.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/517e6d26
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/517e6d26
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/517e6d26

Branch: refs/heads/2.1.0.0-incubating
Commit: 517e6d26cadff3a0fc03af1f36e8302bf454e573
Parents: efa1230
Author: ivan <iw...@pivotal.io>
Authored: Tue Dec 20 16:51:18 2016 +0800
Committer: ivan <iw...@pivotal.io>
Committed: Thu Jan 19 16:10:32 2017 +0800

----------------------------------------------------------------------
 depends/libhdfs3/mock/MockFileSystemInter.h     |   5 +
 .../libhdfs3/src/client/EncryptionZoneInfo.h    |  80 +++++++++++++
 .../src/client/EncryptionZoneIterator.cpp       |  86 ++++++++++++++
 .../src/client/EncryptionZoneIterator.h         |  56 +++++++++
 .../libhdfs3/src/client/FileEncryptionInfo.h    |  93 +++++++++++++++
 depends/libhdfs3/src/client/FileStatus.h        |  19 +++-
 depends/libhdfs3/src/client/FileSystem.cpp      |  54 +++++++++
 depends/libhdfs3/src/client/FileSystem.h        |  32 ++++++
 depends/libhdfs3/src/client/FileSystemImpl.cpp  |  88 ++++++++++++++
 depends/libhdfs3/src/client/FileSystemImpl.h    |  40 +++++++
 depends/libhdfs3/src/client/FileSystemInter.h   |  40 +++++++
 depends/libhdfs3/src/client/Hdfs.cpp            | 114 ++++++++++++++++++-
 depends/libhdfs3/src/client/hdfs.h              |  63 ++++++++++
 .../src/proto/ClientNamenodeProtocol.proto      |   8 ++
 depends/libhdfs3/src/proto/datatransfer.proto   |   1 +
 depends/libhdfs3/src/proto/encryption.proto     |  67 +++++++++++
 depends/libhdfs3/src/proto/hdfs.proto           |  60 ++++++++++
 depends/libhdfs3/src/rpc/RpcAuth.h              |   2 +-
 depends/libhdfs3/src/rpc/RpcChannel.cpp         |   4 +-
 depends/libhdfs3/src/server/Namenode.h          |  35 +++++-
 depends/libhdfs3/src/server/NamenodeImpl.cpp    |  73 ++++++++++++
 depends/libhdfs3/src/server/NamenodeImpl.h      |   9 ++
 depends/libhdfs3/src/server/NamenodeProxy.cpp   |  24 ++++
 depends/libhdfs3/src/server/NamenodeProxy.h     |   7 ++
 depends/libhdfs3/src/server/RpcHelper.h         |  31 +++++
 .../libhdfs3/test/function/TestCInterface.cpp   |  42 +++++++
 .../libhdfs3/test/function/TestFileSystem.cpp   |  33 ++++++
 .../libhdfs3/test/function/TestOutputStream.cpp |  12 ++
 28 files changed, 1171 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/mock/MockFileSystemInter.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/mock/MockFileSystemInter.h b/depends/libhdfs3/mock/MockFileSystemInter.h
index 4d91c73..336db7e 100644
--- a/depends/libhdfs3/mock/MockFileSystemInter.h
+++ b/depends/libhdfs3/mock/MockFileSystemInter.h
@@ -101,6 +101,11 @@ public:
   MOCK_METHOD3(getFileBlockLocations, std::vector<Hdfs::BlockLocation> (const char * path, int64_t start, int64_t len));
   MOCK_METHOD2(listAllDirectoryItems, std::vector<Hdfs::FileStatus> (const char * path, bool needLocation));
   MOCK_METHOD0(getPeerCache, Hdfs::Internal::PeerCache &());
+  MOCK_METHOD2(createEncryptionZone, bool(const char * path, const char * keyName));
+  MOCK_METHOD1(getEZForPath, Hdfs::EncryptionZoneInfo(const char * path));
+  MOCK_METHOD2(listEncryptionZones, bool(const int64_t id, std::vector<Hdfs::EncryptionZoneInfo> &));
+  MOCK_METHOD0(listEncryptionZone, Hdfs::EncryptionZoneIterator());
+  MOCK_METHOD0(listAllEncryptionZoneItems, std::vector<Hdfs::EncryptionZoneInfo>());
 };
 
 #endif /* _HDFS_LIBHDFS3_MOCK_MOCKSOCKET_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/EncryptionZoneInfo.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/EncryptionZoneInfo.h b/depends/libhdfs3/src/client/EncryptionZoneInfo.h
new file mode 100644
index 0000000..d436ae7
--- /dev/null
+++ b/depends/libhdfs3/src/client/EncryptionZoneInfo.h
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_ENCRYPTIONZONEINFO_H_
+#define _HDFS_LIBHDFS3_CLIENT_ENCRYPTIONZONEINFO_H_
+
+#include <string>
+
+namespace Hdfs {
+
+class EncryptionZoneInfo {
+public:
+    EncryptionZoneInfo() : 
+		suite(0), cryptoProtocolVersion(0), id(0) {
+    }
+
+    int getSuite() const {
+        return suite;
+    }
+
+    void setSuite(int suite) {
+        this->suite = suite;
+    }
+
+    int getCryptoProtocolVersion() const {
+        return cryptoProtocolVersion;
+    }
+
+    void setCryptoProtocolVersion(int cryptoProtocolVersion) {
+        this->cryptoProtocolVersion = cryptoProtocolVersion;
+    }
+
+    int getId() const {
+        return id;
+    }
+
+    void setId(int id) {
+        this->id = id;
+    }
+
+    const char * getPath() const{
+        return path.c_str();
+    }
+
+    void setPath(const char * path){
+        this->path = path;
+    }
+
+    const char * getKeyName() const{
+        return keyName.c_str();
+    }
+
+    void setKeyName(const char * keyName){
+        this->keyName = keyName;
+    }
+
+private:
+    int suite;
+    int cryptoProtocolVersion;
+    int64_t id;
+    std::string path;
+    std::string keyName;
+};
+
+}
+#endif

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/EncryptionZoneIterator.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/EncryptionZoneIterator.cpp b/depends/libhdfs3/src/client/EncryptionZoneIterator.cpp
new file mode 100644
index 0000000..085541a
--- /dev/null
+++ b/depends/libhdfs3/src/client/EncryptionZoneIterator.cpp
@@ -0,0 +1,86 @@
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "EncryptionZoneIterator.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "EncryptionZoneInfo.h"
+#include "FileSystemImpl.h"
+
+namespace Hdfs {
+EncryptionZoneIterator::EncryptionZoneIterator() :filesystem(NULL), id(0), next(0) {
+}
+
+EncryptionZoneIterator::EncryptionZoneIterator(Hdfs::Internal::FileSystemImpl * const fs, 
+                                               const int64_t id) :filesystem(fs), id(id), next(0) {
+}
+
+EncryptionZoneIterator::EncryptionZoneIterator(const EncryptionZoneIterator & it) :
+    filesystem(it.filesystem), id(it.id), next(it.next), lists(it.lists) {
+}
+
+EncryptionZoneIterator & EncryptionZoneIterator::operator =(const EncryptionZoneIterator & it) {
+    if (this == &it) {
+        return *this;
+    }
+
+    filesystem = it.filesystem;
+    id = it.id;
+    next = it.next;
+    lists = it.lists;
+    return *this;
+}
+
+bool EncryptionZoneIterator::listEncryptionZones() {
+    bool more;
+
+    if (NULL == filesystem) {
+        return false;
+    }
+
+    next = 0;
+    lists.clear();
+    more = filesystem->listEncryptionZones(id, lists);
+    if (!lists.empty()){
+        id = lists.back().getId();
+    }
+
+    return more || !lists.empty();
+}
+
+bool EncryptionZoneIterator::hasNext() {
+    if (next >= lists.size()) {
+        return listEncryptionZones();
+    }
+
+    return true;
+}
+
+Hdfs::EncryptionZoneInfo EncryptionZoneIterator::getNext() {
+    if (next >= lists.size()) {
+        if (!listEncryptionZones()) {
+            THROW(HdfsIOException, "End of the dir flow");
+        }
+    }
+    return lists[next++];
+}
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/EncryptionZoneIterator.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/EncryptionZoneIterator.h b/depends/libhdfs3/src/client/EncryptionZoneIterator.h
new file mode 100644
index 0000000..9e37559
--- /dev/null
+++ b/depends/libhdfs3/src/client/EncryptionZoneIterator.h
@@ -0,0 +1,56 @@
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHFDS3_CLIENT_ENCRYPTIONZONE_ITERATOR_H_
+#define _HDFS_LIBHFDS3_CLIENT_ENCRYPTIONZONE_ITERATOR_H_
+
+#include "FileStatus.h"
+#include "EncryptionZoneInfo.h"
+#include <vector>
+
+namespace Hdfs {
+namespace Internal {
+class FileSystemImpl;
+}
+
+class EncryptionZoneIterator {
+public:
+    EncryptionZoneIterator();
+    EncryptionZoneIterator(Hdfs::Internal::FileSystemImpl * const fs,
+                          const int64_t id);
+    EncryptionZoneIterator(const EncryptionZoneIterator & it);
+    EncryptionZoneIterator & operator = (const EncryptionZoneIterator & it);
+    bool hasNext();
+    EncryptionZoneInfo getNext();
+
+private:
+    bool listEncryptionZones();
+
+private:
+    Hdfs::Internal::FileSystemImpl * filesystem;
+    int64_t id;
+    size_t next;
+    std::vector<EncryptionZoneInfo> lists;
+};
+
+}
+
+#endif /* _HDFS_LIBHFDS3_CLIENT_ENCRYPTIONZONE_ITERATOR_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/FileEncryptionInfo.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileEncryptionInfo.h b/depends/libhdfs3/src/client/FileEncryptionInfo.h
new file mode 100644
index 0000000..32ead6c
--- /dev/null
+++ b/depends/libhdfs3/src/client/FileEncryptionInfo.h
@@ -0,0 +1,93 @@
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_FILEENCRYPTIONINFO_H_
+#define _HDFS_LIBHDFS3_CLIENT_FILEENCRYPTIONINFO_H_
+
+#include <string>
+
+namespace Hdfs {
+
+class FileEncryptionInfo {
+public:
+    FileEncryptionInfo() : 
+		cryptoProtocolVersion(0), suite(0){
+    }
+
+    int getSuite() const {
+        return suite;
+    }
+
+    void setSuite(int suite) {
+        this->suite = suite;
+    }
+
+    int getCryptoProtocolVersion() const {
+        return cryptoProtocolVersion;
+    }
+
+    void setCryptoProtocolVersion(int cryptoProtocolVersion) {
+        this->cryptoProtocolVersion = cryptoProtocolVersion;
+    }
+
+    const std::string & getKey() const{
+        return key;
+    }
+
+    void setKey(const std::string & key){
+        this->key = key;
+    }
+
+    const std::string & getKeyName() const{
+        return keyName;
+    }
+
+    void setKeyName(const std::string & keyName){
+        this->keyName = keyName;
+    }
+
+    const std::string & getIv() const{
+        return iv;
+    } 
+
+    void setIv(const std::string & iv){
+        this->iv = iv;
+    }
+	
+    const std::string & getEzKeyVersionName() const{
+        return ezKeyVersionName;
+    }
+
+    void setEzKeyVersionName(const std::string & ezKeyVersionName){
+        this->ezKeyVersionName = ezKeyVersionName;
+    }
+
+private:
+    int suite;
+    int cryptoProtocolVersion;
+    std::string key;
+    std::string iv;
+    std::string keyName;
+    std::string ezKeyVersionName; 
+};
+
+}
+#endif

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/FileStatus.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileStatus.h b/depends/libhdfs3/src/client/FileStatus.h
index 51b5096..1033b80 100644
--- a/depends/libhdfs3/src/client/FileStatus.h
+++ b/depends/libhdfs3/src/client/FileStatus.h
@@ -23,8 +23,9 @@
 #define _HDFS_LIBHDFS3_CLIENT_FILESTATUS_H_
 
 #include "Permission.h"
+#include "client/FileEncryptionInfo.h"
 
-#include <string>
+#include <string.h>
 
 namespace Hdfs {
 
@@ -143,6 +144,21 @@ public:
         return !symlink.empty();
     }
 
+    /**
+    * Get encryption information for a file.
+    */
+    FileEncryptionInfo* getFileEncryption(){
+        return &fileEncryption;
+    }
+
+    /**
+    * Is an encryption file?
+    * @return true is this is an encryption file
+    */
+    bool isFileEncrypted() const {
+        return fileEncryption.getKey().length() > 0 && fileEncryption.getKeyName().length() > 0;
+    }
+
 private:
     bool isdir;
     int64_t atime;
@@ -155,6 +171,7 @@ private:
     std::string owner;
     std::string path;
     std::string symlink;
+    FileEncryptionInfo fileEncryption;
 };
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/FileSystem.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileSystem.cpp b/depends/libhdfs3/src/client/FileSystem.cpp
index 6a660a4..8c18590 100644
--- a/depends/libhdfs3/src/client/FileSystem.cpp
+++ b/depends/libhdfs3/src/client/FileSystem.cpp
@@ -20,6 +20,7 @@
  * limitations under the License.
  */
 #include "DirectoryIterator.h"
+#include "EncryptionZoneIterator.h"
 #include "Exception.h"
 #include "ExceptionInternal.h"
 #include "FileSystem.h"
@@ -582,4 +583,57 @@ void FileSystem::cancelDelegationToken(const std::string & token) {
     impl->filesystem->cancelDelegationToken(token);
 }
 
+
+/**
+ * Create encryption zone for the directory with specific key name
+ * @param path the directory path which is to be created.
+ * @param keyname The key name of the encryption zone 
+ * @return return true if success.
+ */
+bool FileSystem::createEncryptionZone(const char * path, const char * keyName) {
+    if (!impl) {
+        THROW(HdfsIOException, "FileSystem: not connected.");
+    }
+
+    return impl->filesystem->createEncryptionZone(path, keyName);
+}
+
+/**
+* To get encryption zone information.
+* @param path the path which information is to be returned.
+* @return the encryption zone information.
+*/
+EncryptionZoneInfo FileSystem::getEZForPath(const char * path) {
+    if (!impl) {
+        THROW(HdfsIOException, "FileSystem: not connected.");
+    }
+   
+    return impl->filesystem->getEZForPath(path);
+}
+
+/**
+ * list the contents of an encryption zone.
+ * @return Return a iterator to visit all elements in this encryption zone.
+ */
+EncryptionZoneIterator FileSystem::listEncryptionZone()  {
+    if (!impl) {
+        THROW(HdfsIOException, "FileSystem: not connected.");
+    }
+
+    return impl->filesystem->listEncryptionZone();
+}
+
+/**
+* list all the contents of encryption zones.
+* @param id the index of encryption zones.
+* @return Return a vector of encryption zones information..
+*/
+std::vector<EncryptionZoneInfo> FileSystem::listAllEncryptionZoneItems() {
+    if (!impl) {
+        THROW(HdfsIOException, "FileSystem: not connected.");
+    }
+
+    return impl->filesystem->listAllEncryptionZoneItems();
+}
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/FileSystem.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileSystem.h b/depends/libhdfs3/src/client/FileSystem.h
index 4f99c3c..e1f4dd2 100644
--- a/depends/libhdfs3/src/client/FileSystem.h
+++ b/depends/libhdfs3/src/client/FileSystem.h
@@ -24,8 +24,10 @@
 
 #include "BlockLocation.h"
 #include "DirectoryIterator.h"
+#include "EncryptionZoneIterator.h"
 #include "FileStatus.h"
 #include "FileSystemStats.h"
+#include "EncryptionZoneInfo.h"
 #include "Permission.h"
 #include "XmlConfig.h"
 
@@ -276,6 +278,36 @@ public:
      */
     void cancelDelegationToken(const std::string & token);
 
+    /**
+     * Create encryption zone for the directory with specific key name
+     * @param path the directory path which is to be created.
+     * @param keyname The key name of the encryption zone 
+     * @return return true if success.
+     */
+    bool createEncryptionZone(const char * path, const char * keyName);
+    
+    /**
+     * To get encryption zone information.
+     * @param path the path which information is to be returned.
+     * @return the encryption zone information.
+     */
+    EncryptionZoneInfo getEZForPath(const char * path);
+
+    /**
+     * list the contents of an encryption zone;
+     * @return Return a iterator to visit all elements in this encryption zone.
+     */
+    EncryptionZoneIterator listEncryptionZone();
+
+
+   /**
+    * list all the contents of encryption zones.
+    * @param id the index of encryption zones.
+    * @return Return a vector of encryption zones information..
+    */
+    std::vector<EncryptionZoneInfo> listAllEncryptionZoneItems();
+
+
 private:
     Config conf;
     Internal::FileSystemWrapper * impl;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/FileSystemImpl.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileSystemImpl.cpp b/depends/libhdfs3/src/client/FileSystemImpl.cpp
index 7b0f20a..6ee2b91 100644
--- a/depends/libhdfs3/src/client/FileSystemImpl.cpp
+++ b/depends/libhdfs3/src/client/FileSystemImpl.cpp
@@ -22,11 +22,13 @@
 #include "Atomic.h"
 #include "BlockLocation.h"
 #include "DirectoryIterator.h"
+#include "EncryptionZoneIterator.h"
 #include "Exception.h"
 #include "ExceptionInternal.h"
 #include "FileStatus.h"
 #include "FileSystemImpl.h"
 #include "FileSystemStats.h"
+#include "EncryptionZoneInfo.h"
 #include "InputStream.h"
 #include "LeaseRenewer.h"
 #include "Logger.h"
@@ -775,5 +777,91 @@ bool FileSystemImpl::unregisterOpenedOutputStream() {
     return  openedOutputStream == 0;
 }
 
+/**
+ * Create encryption zone for the directory with specific key name
+ * @param path the directory path which is to be created.
+ * @param keyname The key name of the encryption zone 
+ * @return return true if success.
+ */
+
+bool FileSystemImpl::createEncryptionZone(const char * path, const char * keyName) {
+    if (!nn) {
+        THROW(HdfsIOException, "FileSystemImpl: not connected.");
+    }
+
+    if (NULL == path || !strlen(path)) {
+        THROW(InvalidParameter, "Invalid input: path should not be empty");
+    }
+
+    if (NULL == keyName || !strlen(keyName)) {
+        THROW(InvalidParameter, "Invalid input: key name should not be empty");
+    }
+
+    return nn->createEncryptionZone(getStandardPath(path), keyName);
+}
+
+
+/**
+ * To get encryption zone information.
+ * @param path the path which information is to be returned.
+ * @return the encryption zone information.
+ */
+
+EncryptionZoneInfo FileSystemImpl::getEZForPath(const char * path) {
+    if (!nn) {
+        THROW(HdfsIOException, "FileSystemImpl: not connected.");
+    }
+
+    if (NULL == path || !strlen(path)) {
+        THROW(InvalidParameter, "Invalid input: path should not be empty");
+    }
+
+    return nn->getEncryptionZoneInfo(getStandardPath(path), NULL);
+}
+
+bool FileSystemImpl::listEncryptionZones(const int64_t id,
+                                std::vector<EncryptionZoneInfo> & ezl) {
+    if (!nn) {
+        THROW(HdfsIOException, "FileSystemImpl: not connected.");
+    }
+
+    return nn->listEncryptionZones(id, ezl);
+}
+
+/**
+ * list the contents of an encryption zone.
+ * @return return the encryption zone information.
+ */
+EncryptionZoneIterator FileSystemImpl::listEncryptionZone() {
+    if (!nn) {
+        THROW(HdfsIOException, "FileSystemImpl: not connected.");
+    }
+
+    return EncryptionZoneIterator(this, 0);
+}
+/**
+ * list all the contents of encryption zones.
+ * @param id the index of the encyrption zones.
+ * @return Return a vector of encryption zones information.
+ */
+
+std::vector<EncryptionZoneInfo> FileSystemImpl::listAllEncryptionZoneItems() {
+    if (!nn) {
+        THROW(HdfsIOException, "FileSystemImpl: not connected.");
+    }
+
+    std::vector<EncryptionZoneInfo> retval;
+    retval.clear();
+    int64_t id = 0;
+
+    EncryptionZoneIterator it;
+    it = FileSystemImpl::listEncryptionZone();
+
+    while (it.hasNext()) {
+        retval.push_back(it.getNext());
+    }
+    return retval;
+}
+
 }
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/FileSystemImpl.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileSystemImpl.h b/depends/libhdfs3/src/client/FileSystemImpl.h
index bd590bd..1c11a61 100644
--- a/depends/libhdfs3/src/client/FileSystemImpl.h
+++ b/depends/libhdfs3/src/client/FileSystemImpl.h
@@ -24,10 +24,12 @@
 
 #include "BlockLocation.h"
 #include "DirectoryIterator.h"
+#include "EncryptionZoneIterator.h"
 #include "FileStatus.h"
 #include "FileSystemInter.h"
 #include "FileSystemKey.h"
 #include "FileSystemStats.h"
+#include "EncryptionZoneInfo.h"
 #include "Permission.h"
 #include "server/Namenode.h"
 #include "SessionConfig.h"
@@ -477,6 +479,44 @@ public:
         return *peerCache;
     }
 
+    /**
+     * Create encryption zone for the directory with specific key name
+     * @param path the directory path which is to be created.
+     * @param keyname The key name of the encryption zone 
+     * @return return true if success.
+     */
+    bool createEncryptionZone(const char * path, const char * keyName);
+
+    /**
+     * To get encryption zone information.
+     * @param path the path which information is to be returned.
+     * @return the encryption zone information.
+     */
+    EncryptionZoneInfo getEZForPath(const char * path);
+
+    /**
+     * Get a partial listing of the indicated encryption zones
+     *
+     * @param id the index of encryption zones.
+     * @param ezl append the returned encryption zones.
+     * @return return true if there are more items.
+     */
+    bool listEncryptionZones(const int64_t id, std::vector<EncryptionZoneInfo> & ezl);
+
+    /**
+     * list the contents of an encryption zone.
+     * @return Return a iterator to visit all elements in this encryption zone.
+     */
+    EncryptionZoneIterator listEncryptionZone();
+
+
+    /**
+     * list all the contents of encryption zones.
+     * @param id the index of encryption zones.
+     * @return Return a vector of encryption zones information..
+     */
+    std::vector<EncryptionZoneInfo> listAllEncryptionZoneItems();
+
 private:
     Config conf;
     FileSystemKey key;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/FileSystemInter.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileSystemInter.h b/depends/libhdfs3/src/client/FileSystemInter.h
index 352c37f..45df347 100644
--- a/depends/libhdfs3/src/client/FileSystemInter.h
+++ b/depends/libhdfs3/src/client/FileSystemInter.h
@@ -27,9 +27,11 @@
 
 #include "BlockLocation.h"
 #include "DirectoryIterator.h"
+#include "EncryptionZoneIterator.h"
 #include "FileStatus.h"
 #include "FileSystemKey.h"
 #include "FileSystemStats.h"
+#include "EncryptionZoneInfo.h"
 #include "PeerCache.h"
 #include "Permission.h"
 #include "server/LocatedBlocks.h"
@@ -481,6 +483,44 @@ public:
      * @return return the peer cache.
      */
     virtual PeerCache& getPeerCache() = 0;
+
+    /**
+     * Create encryption zone for the directory with specific key name
+     * @param path the directory path which is to be created.
+     * @param keyname The key name of the encryption zone 
+     * @return return true if success.
+     */
+    virtual bool createEncryptionZone(const char * path, const char * keyName) = 0;
+
+    /**
+     * To get encryption zone information.
+     * @param path the path which information is to be returned.
+     * @return the encryption zone information.
+     */
+    virtual EncryptionZoneInfo getEZForPath(const char * path) = 0;
+
+    /**
+     * Get a partial listing of the indicated encryption zones
+     *
+     * @param id the index of encryption zones.
+     * @param ezl append the returned encryption zones.
+     * @return return true if there are more items.
+     */   
+    virtual bool listEncryptionZones(const int64_t id, std::vector<EncryptionZoneInfo> & ezl) = 0;
+
+    /**
+     * list the contents of an encryption zone.
+     * @return Return a iterator to visit all elements in this encryption zone.
+     */
+    virtual EncryptionZoneIterator listEncryptionZone() = 0;
+
+
+    /**
+     * list all the contents of encryption zones.
+     * @param id the index of encryption zones.
+     * @return Return a vector of encryption zones information..
+     */
+    virtual std::vector<EncryptionZoneInfo> listAllEncryptionZoneItems() = 0; 
 };
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/Hdfs.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/Hdfs.cpp b/depends/libhdfs3/src/client/Hdfs.cpp
index 395f4f8..2840adc 100644
--- a/depends/libhdfs3/src/client/Hdfs.cpp
+++ b/depends/libhdfs3/src/client/Hdfs.cpp
@@ -989,8 +989,31 @@ int hdfsSetReplication(hdfsFS fs, const char * path, int16_t replication) {
     return -1;
 }
 
+static void ConstructHdfsEncryptionZoneInfo(hdfsEncryptionZoneInfo * infoEn,
+                                  std::vector<Hdfs::EncryptionZoneInfo> & enStatus) {
+    size_t size = enStatus.size();
+
+    for (size_t i = 0; i < size; ++i) {
+        infoEn[i].mSuite = enStatus[i].getSuite();
+        infoEn[i].mCryptoProtocolVersion = enStatus[i].getCryptoProtocolVersion();
+        infoEn[i].mId = enStatus[i].getId();
+        infoEn[i].mPath = Strdup(enStatus[i].getPath());
+        infoEn[i].mKeyName = Strdup(enStatus[i].getKeyName());
+    }
+}
+
+static void ConstructHdfsEncryptionFileInfo(hdfsEncryptionFileInfo * infoEn,
+                                  Hdfs::FileEncryptionInfo* enStatus) {
+    infoEn->mSuite = enStatus->getSuite();
+    infoEn->mCryptoProtocolVersion = enStatus->getCryptoProtocolVersion();
+    infoEn->mKey = const_cast<char*>(enStatus->getKey().c_str());
+    infoEn->mKeyName = const_cast<char*>(enStatus->getKeyName().c_str());
+    infoEn->mIv = const_cast<char*>(enStatus->getIv().c_str());
+    infoEn->mEzKeyVersionName = const_cast<char*>(enStatus->getEzKeyVersionName().c_str());
+}
+
 static void ConstructHdfsFileInfo(hdfsFileInfo * infos,
-                                  const std::vector<Hdfs::FileStatus> & status) {
+                                  std::vector<Hdfs::FileStatus> & status) {
     size_t size = status.size();
 
     for (size_t i = 0; i < size; ++i) {
@@ -1006,6 +1029,13 @@ static void ConstructHdfsFileInfo(hdfsFileInfo * infos,
         infos[i].mPermissions = status[i].getPermission().toShort();
         infos[i].mReplication = status[i].getReplication();
         infos[i].mSize = status[i].getLength();
+        infos[i].mHdfsEncryptionFileInfo = NULL;
+        if (status[i].isFileEncrypted()) {
+             infos[i].mHdfsEncryptionFileInfo = new hdfsEncryptionFileInfo[1];
+             memset(infos[i].mHdfsEncryptionFileInfo, 0, sizeof(hdfsEncryptionFileInfo));
+             ConstructHdfsEncryptionFileInfo(infos[i].mHdfsEncryptionFileInfo, status[i].getFileEncryption());
+             
+        }
     }
 }
 
@@ -1021,7 +1051,7 @@ hdfsFileInfo * hdfsListDirectory(hdfsFS fs, const char * path,
         size = status.size();
         retval = new hdfsFileInfo[size];
         memset(retval, 0, sizeof(hdfsFileInfo) * size);
-        ConstructHdfsFileInfo(&retval[0], status);
+        ConstructHdfsFileInfo(retval, status);
         *numEntries = size;
         return retval;
     } catch (const std::bad_alloc & e) {
@@ -1061,11 +1091,22 @@ hdfsFileInfo * hdfsGetPathInfo(hdfsFS fs, const char * path) {
     return NULL;
 }
 
+void hdfsFreeEncryptionZoneInfo(hdfsEncryptionZoneInfo * infos, int numEntries) {
+    for (int i = 0; infos != NULL && i < numEntries; ++i) {
+        delete [] infos[i].mPath;
+        delete [] infos[i].mKeyName;
+    }
+    delete[] infos;
+}
+
 void hdfsFreeFileInfo(hdfsFileInfo * infos, int numEntries) {
     for (int i = 0; infos != NULL && i < numEntries; ++i) {
         delete [] infos[i].mGroup;
         delete [] infos[i].mName;
         delete [] infos[i].mOwner;
+        if (infos[i].mHdfsEncryptionFileInfo != NULL) {
+            delete [] infos[i].mHdfsEncryptionFileInfo;
+        }
     }
 
     delete[] infos;
@@ -1450,6 +1491,75 @@ void hdfsFreeFileBlockLocations(BlockLocation * locations, int numOfBlock) {
     delete [] locations;
 }
 
+int hdfsCreateEncryptionZone(hdfsFS fs, const char * path, const char * keyName) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0 && keyName && strlen(keyName) > 0, -1, EINVAL);
+
+    try {
+        return fs->getFilesystem().createEncryptionZone(path, keyName) ? 0 : -1;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+hdfsEncryptionZoneInfo * hdfsGetEZForPath(hdfsFS fs, const char * path) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0, NULL, EINVAL);
+    hdfsEncryptionZoneInfo * retval = NULL;
+
+    try {
+        retval = new hdfsEncryptionZoneInfo[1];
+        memset(retval, 0, sizeof(hdfsEncryptionZoneInfo));
+        std::vector<Hdfs::EncryptionZoneInfo> enStatus(1);
+        enStatus[0] = fs->getFilesystem().getEZForPath(path);
+        ConstructHdfsEncryptionZoneInfo(retval, enStatus);
+        return retval;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        hdfsFreeEncryptionZoneInfo(retval, 1);
+        /* If out of memory error occurred, free hdfsEncryptionZoneInfo array's memory. */
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        hdfsFreeEncryptionZoneInfo(retval, 1);
+        /* If any exceptions throw out, free hdfsEncryptionZoneInfo array's memory. */
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+
+hdfsEncryptionZoneInfo * hdfsListEncryptionZones(hdfsFS fs, int * numEntries) {
+    PARAMETER_ASSERT(fs, NULL, EINVAL);
+    hdfsEncryptionZoneInfo * retval = NULL;
+    int size = 0;
+
+    try {
+        std::vector<Hdfs::EncryptionZoneInfo> enStatus =
+            fs->getFilesystem().listAllEncryptionZoneItems();
+        size = enStatus.size();
+        retval = new hdfsEncryptionZoneInfo[size];
+        memset(retval, 0, sizeof(hdfsEncryptionZoneInfo) * size);
+        ConstructHdfsEncryptionZoneInfo(&retval[0], enStatus);
+        *numEntries = size;
+        return retval;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        /* If out of memory error occurred, free hdfsEncryptionZoneInfo array's memory. */ 
+        hdfsFreeEncryptionZoneInfo(retval, size);
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        /* If any exceptions throw out, free hdfsEncryptionZoneInfo array's memory. */
+        hdfsFreeEncryptionZoneInfo(retval, size);
+        handleException(Hdfs::current_exception());
+    }
+    return NULL;
+}
 #ifdef __cplusplus
 }
 #endif

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/client/hdfs.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/hdfs.h b/depends/libhdfs3/src/client/hdfs.h
index 5d356f9..f8b61ea 100644
--- a/depends/libhdfs3/src/client/hdfs.h
+++ b/depends/libhdfs3/src/client/hdfs.h
@@ -482,6 +482,30 @@ int hdfsCreateDirectory(hdfsFS fs, const char * path);
 int hdfsSetReplication(hdfsFS fs, const char * path, int16_t replication);
 
 /**
+ * hdfsEncryptionZoneInfo- Information about an encryption zone.
+ */
+typedef struct {
+    int mSuite; /* the suite of encryption zone */
+    int mCryptoProtocolVersion; /* the version of crypto protocol */
+    int64_t mId; /* the id of encryption zone */
+    char * mPath; /* the path of encryption zone */
+    char * mKeyName; /* the key name of encryption zone */
+} hdfsEncryptionZoneInfo;
+
+
+/**
+ * hdfsEncryptionFileInfo - Information about an encryption file/directory.
+ */
+typedef struct {
+    int mSuite; /* the suite of encryption file/directory */
+    int mCryptoProtocolVersion; /* the version of crypto protocol */
+    char * mKey; /* the key of encryption file/directory */
+    char * mKeyName; /* the key name of encryption file/directory */
+    char * mIv; /* the iv of encryption file/directory */
+    char * mEzKeyVersionName; /* the version encryption file/directory */
+} hdfsEncryptionFileInfo;
+
+/**
  * hdfsFileInfo - Information about a file/directory.
  */
 typedef struct {
@@ -495,6 +519,7 @@ typedef struct {
     char * mGroup; /* the group associated with the file */
     short mPermissions; /* the permissions associated with the file */
     tTime mLastAccess; /* the last access time for the file in seconds */
+    hdfsEncryptionFileInfo * mHdfsEncryptionFileInfo; /* the encryption info of the file/directory */
 } hdfsFileInfo;
 
 /**
@@ -528,6 +553,15 @@ hdfsFileInfo * hdfsGetPathInfo(hdfsFS fs, const char * path);
 void hdfsFreeFileInfo(hdfsFileInfo * infos, int numEntries);
 
 /**
+ * hdfsFreeEncryptionZoneInfo - Free up the hdfsEncryptionZoneInfo array (including fields)
+ * @param infos The array of dynamically-allocated hdfsEncryptionZoneInfo
+ * objects.
+ * @param numEntries The size of the array.
+ */
+void hdfsFreeEncryptionZoneInfo(hdfsEncryptionZoneInfo * infos, int numEntries);
+
+
+/**
  * hdfsGetHosts - Get hostnames where a particular block (determined by
  * pos & blocksize) of a file is stored. The last element in the array
  * is NULL. Due to replication, a single block could be present on
@@ -723,6 +757,35 @@ BlockLocation * hdfsGetFileBlockLocations(hdfsFS fs, const char * path,
  */
 void hdfsFreeFileBlockLocations(BlockLocation * locations, int numOfBlock);
 
+/**
+ * Create encryption zone for the directory with specific key name
+ * @param fs The configured filesystem handle.
+ * @param path The path of the directory.
+ * @param keyname The key name of the encryption zone 
+ * @return Returns 0 on success, -1 on error.
+ */
+int hdfsCreateEncryptionZone(hdfsFS fs, const char * path, const char * keyName);
+
+/**
+ * hdfsEncryptionZoneInfo - Get information about a path as a (dynamically
+ * allocated) single hdfsEncryptionZoneInfo struct. hdfsEncryptionZoneInfo should be
+ * called when the pointer is no longer needed.
+ * @param fs The configured filesystem handle.
+ * @param path The path of the encryption zone.
+ * @return Returns a dynamically-allocated hdfsEncryptionZoneInfo object;
+ * NULL on error.
+ */
+hdfsEncryptionZoneInfo * hdfsGetEZForPath(hdfsFS fs, const char * path);
+
+/**
+ * hdfsEncryptionZoneInfo -  Get list of all the encryption zones.
+ * hdfsFreeEncryptionZoneInfo should be called to deallocate memory.
+ * @param fs The configured filesystem handle.
+ * @return Returns a dynamically-allocated array of hdfsEncryptionZoneInfo objects;
+ * NULL on error.
+ */
+hdfsEncryptionZoneInfo * hdfsListEncryptionZones(hdfsFS fs, int * numEntries);
+
 #ifdef __cplusplus
 }
 #endif

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/proto/ClientNamenodeProtocol.proto b/depends/libhdfs3/src/proto/ClientNamenodeProtocol.proto
index 5362246..1ab69cd 100644
--- a/depends/libhdfs3/src/proto/ClientNamenodeProtocol.proto
+++ b/depends/libhdfs3/src/proto/ClientNamenodeProtocol.proto
@@ -33,6 +33,7 @@ package Hdfs.Internal;
 
 import "hdfs.proto";
 import "Security.proto";
+import "encryption.proto";
 
 /**
  * The ClientNamenodeProtocol Service defines the interface between a client 
@@ -74,6 +75,7 @@ message CreateRequestProto {
   required bool createParent = 5;
   required uint32 replication = 6; // Short: Only 16 bits used
   required uint64 blockSize = 7;
+  repeated CryptoProtocolVersionProto cryptoProtocolVersion = 8;
 }
 
 message CreateResponseProto {
@@ -752,4 +754,10 @@ service ClientNamenodeProtocol {
       returns(GetSnapshotDiffReportResponseProto);
   rpc isFileClosed(IsFileClosedRequestProto)
       returns(IsFileClosedResponseProto);
+  rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
+      returns(CreateEncryptionZoneResponseProto);
+  rpc listEncryptionZones(ListEncryptionZonesRequestProto)
+      returns(ListEncryptionZonesResponseProto);
+  rpc getEZForPath(GetEZForPathRequestProto)
+      returns(GetEZForPathResponseProto);
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/proto/datatransfer.proto
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/proto/datatransfer.proto b/depends/libhdfs3/src/proto/datatransfer.proto
index 5d8013e..d787d8a 100644
--- a/depends/libhdfs3/src/proto/datatransfer.proto
+++ b/depends/libhdfs3/src/proto/datatransfer.proto
@@ -43,6 +43,7 @@ message DataTransferEncryptorMessageProto {
   required DataTransferEncryptorStatus status = 1;
   optional bytes payload = 2;
   optional string message = 3;
+  repeated CipherOptionProto cipherOption = 4;
 }
 
 message BaseHeaderProto {

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/proto/encryption.proto
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/proto/encryption.proto b/depends/libhdfs3/src/proto/encryption.proto
new file mode 100644
index 0000000..53206f8
--- /dev/null
+++ b/depends/libhdfs3/src/proto/encryption.proto
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+/** This file contains protocol buffers that are used throughout HDFS -- i.e.
+ *  by the client, server, and data transfer protocols.
+ */
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "EncryptionZonesProtos";
+option java_generate_equals_and_hash = true;
+package Hdfs.Internal;
+
+import "hdfs.proto";
+
+message CreateEncryptionZoneRequestProto {
+  required string src = 1;
+  optional string keyName = 2;
+}
+
+message CreateEncryptionZoneResponseProto {
+}
+
+message ListEncryptionZonesRequestProto {
+  required int64 id = 1;
+}
+
+message EncryptionZoneProto {
+  required int64 id = 1;
+  required string path = 2;
+  required CipherSuiteProto suite = 3;
+  required CryptoProtocolVersionProto cryptoProtocolVersion = 4;
+  required string keyName = 5;
+}
+
+message ListEncryptionZonesResponseProto {
+  repeated EncryptionZoneProto zones = 1;
+  required bool hasMore = 2;
+}
+
+message GetEZForPathRequestProto {
+    required string src = 1;
+}
+
+message GetEZForPathResponseProto {
+    optional EncryptionZoneProto zone = 1;
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/proto/hdfs.proto b/depends/libhdfs3/src/proto/hdfs.proto
index 19e3f79..d85f217 100644
--- a/depends/libhdfs3/src/proto/hdfs.proto
+++ b/depends/libhdfs3/src/proto/hdfs.proto
@@ -160,6 +160,64 @@ message DataEncryptionKeyProto {
   optional string encryptionAlgorithm = 6;
 }
 
+/**
+ * Cipher suite.
+ */
+enum CipherSuiteProto {
+    UNKNOWN = 1;
+    AES_CTR_NOPADDING = 2;
+}
+
+/**
+ * Crypto protocol version used to access encrypted files.
+ */
+enum CryptoProtocolVersionProto {
+    UNKNOWN_PROTOCOL_VERSION = 1;
+    ENCRYPTION_ZONES = 2;
+}
+
+/**
+ * Encryption information for a file.
+ */
+message FileEncryptionInfoProto {
+  required CipherSuiteProto suite = 1;
+  required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
+  required bytes key = 3;
+  required bytes iv = 4;
+  required string keyName = 5;
+  required string ezKeyVersionName = 6;
+}
+
+/**
+ * Encryption information for an individual
+ * file within an encryption zone
+ */
+message PerFileEncryptionInfoProto {
+  required bytes key = 1;
+  required bytes iv = 2;
+  required string ezKeyVersionName = 3;
+}
+
+/**
+ * Encryption information for an encryption
+ * zone
+ */
+message ZoneEncryptionInfoProto {
+  required CipherSuiteProto suite = 1;
+  required CryptoProtocolVersionProto cryptoProtocolVersion = 2;
+  required string keyName = 3;
+}
+
+/**
+ * Cipher option
+ */
+message CipherOptionProto {
+  required CipherSuiteProto suite = 1;
+  optional bytes inKey = 2;
+  optional bytes inIv = 3;
+  optional bytes outKey = 4;
+  optional bytes outIv = 5;
+}
 
 /**
  * A set of file blocks and their locations.
@@ -203,6 +261,8 @@ message HdfsFileStatusProto {
   // Optional field for fileId
   optional uint64 fileId = 13 [default = 0]; // default as an invalid id
   optional int32 childrenNum = 14 [default = -1];
+  // Optional field for file encryption
+  optional FileEncryptionInfoProto fileEncryptionInfo = 15;
 } 
 
 /**

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/rpc/RpcAuth.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/rpc/RpcAuth.h b/depends/libhdfs3/src/rpc/RpcAuth.h
index df503bb..5075f08 100644
--- a/depends/libhdfs3/src/rpc/RpcAuth.h
+++ b/depends/libhdfs3/src/rpc/RpcAuth.h
@@ -33,7 +33,7 @@ namespace Internal {
 enum AuthMethod {
     SIMPLE = 80, KERBEROS = 81, //"GSSAPI"
     TOKEN = 82, //"DIGEST-MD5"
-    UNKNOWN = 255
+    UNSURENESS = 255
 };
 
 enum AuthProtocol {

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/rpc/RpcChannel.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/rpc/RpcChannel.cpp b/depends/libhdfs3/src/rpc/RpcChannel.cpp
index 974e86c..7f9ef5d 100644
--- a/depends/libhdfs3/src/rpc/RpcChannel.cpp
+++ b/depends/libhdfs3/src/rpc/RpcChannel.cpp
@@ -121,7 +121,7 @@ const RpcSaslProto_SaslAuth * RpcChannelImpl::createSaslClient(
             break;
         } else if (method.getMethod() == AuthMethod::SIMPLE) {
             return auth;
-        } else if (method.getMethod() == AuthMethod::UNKNOWN) {
+        } else if (method.getMethod() == AuthMethod::UNSURENESS) {
             return auth;
         } else {
             auth = NULL;
@@ -187,7 +187,7 @@ RpcAuth RpcChannelImpl::setupSaslConnection() {
 
             if (retval.getMethod() == AuthMethod::SIMPLE) {
                 done = true;
-            } else if (retval.getMethod() == AuthMethod::UNKNOWN) {
+            } else if (retval.getMethod() == AuthMethod::UNSURENESS) {
                 THROW(AccessControlException, "Unknown auth mechanism");
             } else {
                 std::string respToken;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/server/Namenode.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/Namenode.h b/depends/libhdfs3/src/server/Namenode.h
index 9577b6d..ad213e8 100644
--- a/depends/libhdfs3/src/server/Namenode.h
+++ b/depends/libhdfs3/src/server/Namenode.h
@@ -23,6 +23,7 @@
 #define _HDFS_LIBHDFS3_SERVER_NAMENODE_H_
 
 #include "client/FileStatus.h"
+#include "client/EncryptionZoneInfo.h"
 #include "client/Permission.h"
 #include "DatanodeInfo.h"
 #include "Exception.h"
@@ -809,8 +810,40 @@ public:
      * close the namenode connection.
      */
     virtual void close() {};
-};
 
+    /**
+     * Create encryption zone for the directory with specific key name
+     * @param path the directory path which is to be created.
+     * @param keyname The key name of the encryption zone 
+     * @return return true if success.
+     * @throw HdfsIOException If an I/O error occurred
+     */
+    virtual bool createEncryptionZone(const std::string & src, const std::string & keyName) = 0;
+
+    /**
+     * To get encryption zone information.
+     * @param path the path which information is to be returned.
+     * @return the encryption zone information.
+     * @throw FileNotFoundException If file <code>src</code> does not exist
+     * @throw UnresolvedLinkException If <code>src</code> contains a symlink
+     * @throw HdfsIOException If an I/O error occurred
+     */
+    virtual EncryptionZoneInfo getEncryptionZoneInfo(const std::string & src, bool *exist) = 0; 
+
+    /**
+     * Get a partial listing of the indicated encryption zones
+     *
+     * @param id the index of encryption zone
+     * @param ezl append the returned encryption zones.
+     *
+     * @throw AccessControlException permission denied
+     * @throw UnresolvedLinkException If <code>src</code> contains a symlink
+     * @throw HdfsIOException If an I/O error occurred
+     */
+    virtual bool listEncryptionZones(const int64_t id, std::vector<EncryptionZoneInfo> & ezl) 
+              /* throw (AccessControlException, UnresolvedLinkException, HdfsIOException) */ = 0;
+
+};
 }
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/server/NamenodeImpl.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/NamenodeImpl.cpp b/depends/libhdfs3/src/server/NamenodeImpl.cpp
index ae55e9d..958f6b1 100644
--- a/depends/libhdfs3/src/server/NamenodeImpl.cpp
+++ b/depends/libhdfs3/src/server/NamenodeImpl.cpp
@@ -85,6 +85,7 @@ void NamenodeImpl::create(const std::string & src, const Permission & masked,
          FileAlreadyExistsException, FileNotFoundException,
          NSQuotaExceededException, ParentNotDirectoryException,
           UnresolvedLinkException, HdfsIOException) */{
+
     try {
         CreateRequestProto request;
         CreateResponseProto response;
@@ -94,6 +95,7 @@ void NamenodeImpl::create(const std::string & src, const Permission & masked,
         request.set_createparent(createParent);
         request.set_replication(replication);
         request.set_src(src);
+        request.add_cryptoprotocolversion(CryptoProtocolVersionProto::ENCRYPTION_ZONES);
         Build(masked, request.mutable_masked());
         invoke(RpcCall(false, "create", &request, &response));
     } catch (const HdfsRpcServerException & e) {
@@ -792,5 +794,76 @@ void NamenodeImpl::cancelDelegationToken(const Token & token) {
     }
 }
 
+bool NamenodeImpl::createEncryptionZone(const std::string & src, const std::string & keyName) {
+    try {
+        CreateEncryptionZoneRequestProto request;
+        CreateEncryptionZoneResponseProto response;
+        request.set_src(src);
+        request.set_keyname(keyName);
+        invoke(RpcCall(true, "createEncryptionZone",&request, &response));
+        return true;
+    } catch (const HdfsRpcServerException & e) {
+        UnWrapper < HdfsIOException > unwrapper(e);
+        unwrapper.unwrap(__FILE__, __LINE__);
+    }
+}
+
+EncryptionZoneInfo NamenodeImpl::getEncryptionZoneInfo(const std::string & src, bool *exist)
+/* throw (FileNotFoundException,
+ UnresolvedLinkException, HdfsIOException) */{
+    EncryptionZoneInfo retval;
+
+    try {
+        GetEZForPathRequestProto request;
+        GetEZForPathResponseProto response;
+        request.set_src(src);
+        invoke(RpcCall(true, "getEZForPath", &request, &response));
+
+        if (response.has_zone()) {
+            Convert(retval, response.zone());
+            retval.setPath(src.c_str());
+
+            if (exist) {
+                *exist = true;
+            }
+
+            return retval;
+        }
+
+        if (!exist) {
+            THROW(FileNotFoundException, "Path %s does not exist.", src.c_str());
+        }
+
+        *exist = false;
+    } catch (const HdfsRpcServerException & e) {
+        UnWrapper < FileNotFoundException,
+                  UnresolvedLinkException, HdfsIOException > unwrapper(e);
+        unwrapper.unwrap(__FILE__, __LINE__);
+    }
+
+    return retval;
+}
+
+//Idempotent
+bool NamenodeImpl::listEncryptionZones(const int64_t id, std::vector<EncryptionZoneInfo> & ezl) 
+                                      /* throw (AccessControlException,FileNotFoundException, UnresolvedLinkException, HdfsIOException) */{
+    try {
+        ListEncryptionZonesRequestProto request;
+        ListEncryptionZonesResponseProto response;
+        request.set_id(id);
+        invoke(RpcCall(true, "listEncryptionZones", &request, &response));
+
+        if (response.zones_size() != 0) {
+            Convert(ezl, response);
+            return response.hasmore();
+        }
+
+    } catch (const HdfsRpcServerException & e) {
+        UnWrapper < FileNotFoundException,
+                  UnresolvedLinkException, HdfsIOException > unwrapper(e);
+        unwrapper.unwrap(__FILE__, __LINE__);
+    }
+}
+
 }
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/server/NamenodeImpl.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/NamenodeImpl.h b/depends/libhdfs3/src/server/NamenodeImpl.h
index 2d915da..1abe6d9 100644
--- a/depends/libhdfs3/src/server/NamenodeImpl.h
+++ b/depends/libhdfs3/src/server/NamenodeImpl.h
@@ -218,6 +218,15 @@ public:
     void cancelDelegationToken(const Token & token)
     /*throws IOException*/;
 
+    bool createEncryptionZone(const std::string & src, const std::string & keyName);
+    /* throws HdfsIOException If an I/O error occurred */
+
+    EncryptionZoneInfo getEncryptionZoneInfo(const std::string & src, bool *exist);
+    /* throw (FileNotFoundException, UnresolvedLinkException, HdfsIOException) */     
+    bool listEncryptionZones(const int64_t id, std::vector<EncryptionZoneInfo> & ezl);
+    /* throw (AccessControlException, UnresolvedLinkException, HdfsIOException) */ 
+
+
 private:
     void invoke(const RpcCall & call);
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/server/NamenodeProxy.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/NamenodeProxy.cpp b/depends/libhdfs3/src/server/NamenodeProxy.cpp
index 893b44f..81581ef 100644
--- a/depends/libhdfs3/src/server/NamenodeProxy.cpp
+++ b/depends/libhdfs3/src/server/NamenodeProxy.cpp
@@ -524,5 +524,29 @@ void NamenodeProxy::close() {
     namenodes.clear();
 }
 
+bool NamenodeProxy::createEncryptionZone(const std::string & src, const std::string & keyName) {
+    NAMENODE_HA_RETRY_BEGIN();
+    return namenode->createEncryptionZone(src, keyName);
+    NAMENODE_HA_RETRY_END();
+    assert(!"should not reach here");
+    return false;
+}
+
+EncryptionZoneInfo NamenodeProxy::getEncryptionZoneInfo(const std::string & src, bool *exist) {
+    NAMENODE_HA_RETRY_BEGIN();
+    return namenode->getEncryptionZoneInfo(src, exist);
+    NAMENODE_HA_RETRY_END();
+    assert(!"should not reach here");
+    return EncryptionZoneInfo();
+}
+
+bool NamenodeProxy::listEncryptionZones(const int64_t id, std::vector<EncryptionZoneInfo> & ezl) {
+    NAMENODE_HA_RETRY_BEGIN();
+    return namenode->listEncryptionZones(id, ezl);
+    NAMENODE_HA_RETRY_END();
+    assert(!"should not reach here");
+    return false;
+}
+
 }
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/server/NamenodeProxy.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/NamenodeProxy.h b/depends/libhdfs3/src/server/NamenodeProxy.h
index 7d22713..c5b5142 100644
--- a/depends/libhdfs3/src/server/NamenodeProxy.h
+++ b/depends/libhdfs3/src/server/NamenodeProxy.h
@@ -139,6 +139,13 @@ public:
 
     void close();
 
+    bool createEncryptionZone(const std::string & path, const std::string & keyName);
+
+    EncryptionZoneInfo getEncryptionZoneInfo(const std::string & src, bool *exist);
+
+    bool listEncryptionZones(const int64_t id, std::vector<EncryptionZoneInfo> & ezl); 
+
+
 private:
     shared_ptr<Namenode> getActiveNamenode(uint32_t & oldValue);
     void failoverToNextNamenode(uint32_t oldValue);

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/src/server/RpcHelper.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/RpcHelper.h b/depends/libhdfs3/src/server/RpcHelper.h
index c6d9a4e..571ffc0 100644
--- a/depends/libhdfs3/src/server/RpcHelper.h
+++ b/depends/libhdfs3/src/server/RpcHelper.h
@@ -23,6 +23,7 @@
 #define _HDFS_LIBHDFS3_SERVER_RPCHELPER_H_
 
 #include "client/FileStatus.h"
+#include "client/EncryptionZoneInfo.h"
 #include "client/Permission.h"
 #include "ClientDatanodeProtocol.pb.h"
 #include "ClientNamenodeProtocol.pb.h"
@@ -182,6 +183,36 @@ static inline void Convert(const std::string & src, FileStatus & fs,
     fs.setSymlink(proto.symlink().c_str());
     fs.setPermission(Permission(proto.permission().perm()));
     fs.setIsdir(proto.filetype() == HdfsFileStatusProto::IS_DIR);
+
+    if (proto.has_fileencryptioninfo()){
+        const FileEncryptionInfoProto &encrypt = proto.fileencryptioninfo();
+        FileEncryptionInfo* convert = fs.getFileEncryption();
+        convert->setSuite(encrypt.suite());
+        convert->setCryptoProtocolVersion(encrypt.cryptoprotocolversion());
+        convert->setKey(encrypt.key());
+        convert->setKeyName(encrypt.keyname());
+        convert->setIv(encrypt.iv());
+        convert->setEzKeyVersionName(encrypt.ezkeyversionname()); 
+    }
+}
+
+static inline void Convert(EncryptionZoneInfo & enZone,
+                           const EncryptionZoneProto & proto) {
+    enZone.setSuite(proto.suite());
+    enZone.setCryptoProtocolVersion(proto.cryptoprotocolversion());
+    enZone.setId(proto.id());
+    enZone.setPath(proto.path().c_str());
+    enZone.setKeyName(proto.keyname().c_str());
+}
+
+static inline void Convert(std::vector<EncryptionZoneInfo> & ezl,
+                           const ListEncryptionZonesResponseProto & proto) {
+    RepeatedPtrField<EncryptionZoneProto> ptrproto = proto.zones();
+    for (int i=0; i < ptrproto.size(); i++) {
+        EncryptionZoneInfo enZoneInfo;
+        Convert(enZoneInfo, ptrproto.Get(i));
+        ezl.push_back(enZoneInfo);
+    }
 }
 
 static inline void Convert(const std::string & src,

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/test/function/TestCInterface.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/test/function/TestCInterface.cpp b/depends/libhdfs3/test/function/TestCInterface.cpp
index 2b81067..e45aaee 100644
--- a/depends/libhdfs3/test/function/TestCInterface.cpp
+++ b/depends/libhdfs3/test/function/TestCInterface.cpp
@@ -30,6 +30,9 @@
 #include <fcntl.h>
 #include <stdlib.h>
 #include <limits>
+#include <stdlib.h>
+#include <sstream>
+#include <iostream>
 
 using namespace Hdfs::Internal;
 
@@ -198,6 +201,45 @@ TEST(TestCInterfaceConnect, TestConnect_Success) {
     ASSERT_EQ(hdfsDisconnect(fs), 0);
 }
 
+TEST(TestCInterfaceTDE, DISABLED_TestCreateEnRPC_Success) {
+    hdfsFS fs = NULL;
+    hdfsEncryptionZoneInfo * enInfo = NULL;
+    char * uri = NULL;
+    setenv("LIBHDFS3_CONF", "function-test.xml", 1);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+    assert(bld != NULL);
+    hdfsBuilderSetNameNode(bld, "default");
+    fs = hdfsBuilderConnect(bld);
+    ASSERT_TRUE(fs != NULL);
+    system("hadoop fs -rmr /TDE");
+    system("hadoop key create keytde");
+    system("hadoop fs -mkdir /TDE");
+    ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, "/TDE", "keytde")); 
+    enInfo = hdfsGetEZForPath(fs, "/TDE");
+    ASSERT_TRUE(enInfo != NULL);
+    EXPECT_TRUE(enInfo->mKeyName != NULL);
+    std::cout << "----hdfsEncryptionZoneInfo----:" << " KeyName : " << enInfo->mKeyName << " Suite : " << enInfo->mSuite << " CryptoProtocolVersion : " << enInfo->mCryptoProtocolVersion << " Id : " << enInfo->mId << " Path : " << enInfo->mPath << std::endl;
+    hdfsFreeEncryptionZoneInfo(enInfo, 1);
+    for (int i = 0; i <= 201; i++){
+        std::stringstream newstr;
+        newstr << i;
+        std::string tde = "/TDE" + newstr.str();
+        std::string key = "keytde" + newstr.str();
+        std::string rmTde = "hadoop fs -rmr /TDE" + newstr.str();
+        std::string tdeKey = "hadoop key create keytde" + newstr.str();
+        std::string mkTde = "hadoop fs -mkdir /TDE" + newstr.str();
+        system(rmTde.c_str());
+        system(tdeKey.c_str());
+        system(mkTde.c_str());
+        ASSERT_EQ(0, hdfsCreateEncryptionZone(fs, tde.c_str(), key.c_str()));
+    } 
+    hdfsEncryptionZoneInfo * enZoneInfos = NULL;
+    int num = 0;
+    hdfsListEncryptionZones(fs, &num);
+    EXPECT_EQ(num, 203); 
+    ASSERT_EQ(hdfsDisconnect(fs), 0);
+    hdfsFreeBuilder(bld);
+}
 
 TEST(TestErrorMessage, TestErrorMessage) {
     EXPECT_NO_THROW(hdfsGetLastError());

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/test/function/TestFileSystem.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/test/function/TestFileSystem.cpp b/depends/libhdfs3/test/function/TestFileSystem.cpp
index 3191adb..b9d3f6b 100644
--- a/depends/libhdfs3/test/function/TestFileSystem.cpp
+++ b/depends/libhdfs3/test/function/TestFileSystem.cpp
@@ -147,6 +147,39 @@ TEST_F(TestFileSystem, listDirectory) {
     ASSERT_THROW(it.getNext(), HdfsIOException);
 }
 
+TEST_F(TestFileSystem, DISABLED_listEncryptionZone) {
+    fs->disconnect();
+    fs->connect();
+    const int dirs = 201;
+
+    for (int i = 0; i < dirs; i++){
+        std::stringstream newstr;
+        newstr << i;
+        std::string tde = "/TDE" + newstr.str();
+        std::string key = "keytde" + newstr.str();
+        std::string rmTde = "hadoop fs -rmr /TDE" + newstr.str();
+        std::string tdeKey = "hadoop key create keytde" + newstr.str();
+        std::string mkTde = "hadoop fs -mkdir /TDE" + newstr.str();
+        std::string tdeZone = "hdfs crypto -createZone -keyName " + key + "-path " + tde;
+        system(rmTde.c_str());
+        system(tdeKey.c_str());
+        system(mkTde.c_str());
+        system(tdeZone.c_str());
+    }
+
+    EncryptionZoneIterator it;
+    EXPECT_NO_THROW(it = fs->listEncryptionZone());
+    int count = 0;
+
+    while (it.hasNext()) {
+        count ++;
+        it.getNext();
+    }
+
+    ASSERT_EQ(dirs, count);
+    ASSERT_THROW(it.getNext(), HdfsIOException);
+}
+
 TEST_F(TestFileSystem, setOwner) {
     fs->disconnect();
     ASSERT_THROW(fs->setOwner(BASE_DIR, "setOwner", ""), HdfsIOException);

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/517e6d26/depends/libhdfs3/test/function/TestOutputStream.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/test/function/TestOutputStream.cpp b/depends/libhdfs3/test/function/TestOutputStream.cpp
index faf84e7..e57df34 100644
--- a/depends/libhdfs3/test/function/TestOutputStream.cpp
+++ b/depends/libhdfs3/test/function/TestOutputStream.cpp
@@ -517,6 +517,18 @@ TEST_F(TestOutputStream, TestOpenFileForWrite) {
 }
 
 
+TEST_F(TestOutputStream, DISABLE_TestOpenFileForWriteTDE){
+    conf.set("output.default.packetsize", 1024);
+    fs = new FileSystem(conf);
+    fs->connect();
+    fs->mkdirs("/testTDE", 0755);
+    system("hadoop key create amy");
+    system("hdfs crypto -createZone -keyName amy -path /testTDE");
+    OutputStream other;
+    ASSERT_NO_THROW(other.open(*fs, "/testTDE/amy", Create | Append));
+    other.close();
+    fs->disconnect();
+}
 
 TEST_F(TestOutputStream, TestWriteChunkPacket) {
     //test create a file and write a block


[30/50] [abbrv] incubator-hawq git commit: HAWQ-1203. Ranger Plugin Service Implementation. (with contributions by Lav Jain and Leslie Chang) (close #1092)

Posted by es...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/pom.xml
----------------------------------------------------------------------
diff --git a/ranger-plugin/pom.xml b/ranger-plugin/pom.xml
new file mode 100644
index 0000000..20d3112
--- /dev/null
+++ b/ranger-plugin/pom.xml
@@ -0,0 +1,248 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.apache.hawq</groupId>
+    <artifactId>ranger-plugin</artifactId>
+    <version>2.1.0.0</version>
+    <packaging>pom</packaging>
+    <name>HAWQ Ranger Plugin</name>
+    <description>HAWQ Ranger Plugin</description>
+
+    <modules>
+        <module>admin-plugin</module>
+        <module>service</module>
+    </modules>
+
+    <properties>
+        <jackson.version>1.9</jackson.version>
+        <release.version>1</release.version>
+        <postgresql.version>9.1-901-1.jdbc4</postgresql.version>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+    </properties>
+
+    <build>
+        <testResources>
+            <testResource>
+                <directory>src/test/resources</directory>
+                <includes>
+                    <include>**/*</include>
+                </includes>
+                <filtering>true</filtering>
+            </testResource>
+        </testResources>
+        <plugins>
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>rpm-maven-plugin</artifactId>
+                <version>2.0.1</version>
+                <inherited>false</inherited>
+                <executions>
+                    <execution>
+                        <inherited>false</inherited>
+                        <phase>install</phase>
+                        <goals>
+                            <goal>rpm</goal>
+                        </goals>
+                    </execution>
+                </executions>
+
+                <configuration>
+                    <copyright>ASL 2.0</copyright>
+                    <group>org.apache.hawq.ranger</group>
+                    <defaultUsername>gpadmin</defaultUsername>
+                    <defaultGroupname>gpadmin</defaultGroupname>
+                    <description>
+                        HAWQ Ranger plugin.
+                    </description>
+                    <release>${release.version}</release>
+                    <requires>
+                        <require>bigtop-tomcat</require>
+                        <require>hawq_${hawq.name.version} &gt;= ${project.version}</require>
+                    </requires>
+                    <mappings>
+                        <mapping>
+                            <directory>/usr/local/hawq_${hawq.name.version}/ranger/bin</directory>
+                            <filemode>755</filemode>
+                            <sources>
+                                <source>
+                                    <location>scripts</location>
+                                </source>
+                            </sources>
+                        </mapping>
+                        <mapping>
+                            <directory>/usr/local/hawq_${hawq.name.version}/ranger/lib</directory>
+                            <sources>
+                                <source>
+                                    <location>admin-plugin/target/ranger-plugin-admin-${project.version}.jar</location>
+                                </source>
+                                <source>
+                                    <location>admin-plugin/target/lib/postgresql-${postgresql.version}.jar</location>
+                                </source>
+                            </sources>
+                        </mapping>
+                        <mapping>
+                            <directory>/usr/local/hawq_${hawq.name.version}/ranger/etc</directory>
+                            <sources>
+                                <source>
+                                    <location>conf/ranger-servicedef-hawq.json</location>
+                                </source>
+                            </sources>
+                        </mapping>
+                        <mapping>
+                            <directory>/usr/local/hawq_${hawq.name.version}/ranger/plugin-service</directory>
+                        </mapping>
+                        <mapping>
+                            <directory>/usr/local/hawq_${hawq.name.version}/ranger/plugin-service/bin</directory>
+                        </mapping>
+                        <mapping>
+                            <directory>/usr/local/hawq_${hawq.name.version}/ranger/plugin-service/logs</directory>
+                        </mapping>
+                        <mapping>
+                            <directory>/usr/local/hawq_${hawq.name.version}/ranger/plugin-service/temp</directory>
+                        </mapping>
+                        <mapping>
+                            <directory>/usr/local/hawq_${hawq.name.version}/ranger/plugin-service/work</directory>
+                        </mapping>
+                        <mapping>
+                            <directory>/usr/local/hawq_${hawq.name.version}/ranger/plugin-service/webapps</directory>
+                            <sources>
+                                <source>
+                                    <location>service/target/ranger-plugin-service-${project.version}.war</location>
+                                    <destination>rps.war</destination>
+                                </source>
+                            </sources>
+                        </mapping>
+                        <mapping>
+                            <directory>/usr/local/hawq_${hawq.name.version}/ranger/plugin-service/conf</directory>
+                            <sources>
+                                <source>
+                                    <location>conf/tomcat-server.xml</location>
+                                </source>
+                            </sources>
+                        </mapping>
+                    </mappings>
+                </configuration>
+            </plugin>
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>build-helper-maven-plugin</artifactId>
+                <version>1.7</version>
+                <executions>
+                    <execution>
+                        <id>regex-property</id>
+                        <goals>
+                            <goal>regex-property</goal>
+                        </goals>
+                        <configuration>
+                            <name>hawq.name.version</name>
+                            <value>${project.version}</value>
+                            <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)?$</regex>
+                            <replacement>$1_$2_$3_$4</replacement>
+                            <failIfNoMatch>true</failIfNoMatch>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <version>3.1</version>
+                <configuration>
+                    <source>1.7</source>
+                    <target>1.7</target>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+
+    <dependencyManagement>
+        <dependencies>
+            <dependency>
+                <groupId>org.apache.ranger</groupId>
+                <artifactId>ranger-plugins-common</artifactId>
+                <version>0.6.0</version>
+            </dependency>
+            <dependency>
+                <groupId>log4j</groupId>
+                <artifactId>log4j</artifactId>
+                <version>1.2.17</version>
+            </dependency>
+            <dependency>
+                <groupId>commons-logging</groupId>
+                <artifactId>commons-logging</artifactId>
+                <version>1.2</version>
+            </dependency>
+            <dependency>
+                <groupId>postgresql</groupId>
+                <artifactId>postgresql</artifactId>
+                <version>${postgresql.version}</version>
+            </dependency>
+            <dependency>
+                <groupId>org.codehaus.jackson</groupId>
+                <artifactId>jackson-xc</artifactId>
+                <version>1.9.13</version>
+            </dependency>
+            <dependency>
+              <groupId>com.google.guava</groupId>
+              <artifactId>guava</artifactId>
+              <version>11.0.2</version>
+            </dependency>
+            <dependency>
+                <groupId>javax.servlet</groupId>
+                <artifactId>servlet-api</artifactId>
+                <version>2.5</version>
+                <scope>provided</scope>
+            </dependency>
+            <dependency>
+                <groupId>javax.servlet.jsp</groupId>
+                <artifactId>jsp-api</artifactId>
+                <version>2.1</version>
+                <scope>provided</scope>
+            </dependency>
+            <dependency>
+                <groupId>junit</groupId>
+                <artifactId>junit</artifactId>
+                <version>4.12</version>
+                <scope>test</scope>
+            </dependency>
+            <dependency>
+                <groupId>org.mockito</groupId>
+                <artifactId>mockito-core</artifactId>
+                <version>1.10.19</version>
+                <scope>test</scope>
+            </dependency>
+            <dependency>
+                <groupId>org.powermock</groupId>
+                <artifactId>powermock-module-junit4</artifactId>
+                <version>1.6.5</version>
+                <scope>test</scope>
+            </dependency>
+            <dependency>
+                <groupId>org.powermock</groupId>
+                <artifactId>powermock-api-mockito</artifactId>
+                <version>1.6.5</version>
+                <scope>test</scope>
+            </dependency>
+        </dependencies>
+    </dependencyManagement>
+
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/scripts/register_hawq.sh
----------------------------------------------------------------------
diff --git a/ranger-plugin/scripts/register_hawq.sh b/ranger-plugin/scripts/register_hawq.sh
new file mode 100755
index 0000000..11e2df8
--- /dev/null
+++ b/ranger-plugin/scripts/register_hawq.sh
@@ -0,0 +1,217 @@
+#!/usr/bin/env bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+function usage() {
+  echo "USAGE: register_hawq.sh -r ranger_host:ranger_port -u ranger_user -p ranger_password -h hawq_host:hawq_port -w hawq_user -q hawq_password"
+  exit 1
+}
+
+function fail() {
+  echo "ERROR: $1"
+  exit 1
+}
+
+function mask() {
+  printf -v stars '%*s' ${#1} ''
+  echo "[${stars// /*}]"
+}
+
+function read_value() {
+  local input
+  read -p "Enter value for $1 : " input
+  echo $input
+}
+
+function read_password() {
+  local input
+  read -s -p "Enter value for $1 : " input
+  echo $input
+}
+
+function get_ranger_url() {
+  while [[ -z "$RANGER_URL" ]]
+  do
+    RANGER_URL=$(read_value "Ranger Admin host and port (e.g. abc.com:6080)")
+  done
+  local prefix="http://"
+  RANGER_URL=${RANGER_URL#$prefix}
+}
+
+function get_ranger_user() {
+  while [[ -z "$RANGER_USER" ]]
+  do
+    RANGER_USER=$(read_value "Ranger Admin user name")
+  done
+}
+
+function get_ranger_password() {
+  while [[ -z "$RANGER_PASSWORD" ]]
+  do
+    RANGER_PASSWORD=$(read_password "Ranger Admin password")
+    echo
+  done
+}
+
+function get_hawq_url() {
+  #todo read hawq-site.xml ?
+  local default=`hostname -f`
+  default="${default}:5432"
+  while [[ -z "$HAWQ_URL" ]]
+  do
+    HAWQ_URL=$(read_value "HAWQ Master host and port [${default}]")
+  done
+  local prefix="http://"
+  HAWQ_URL=${HAWQ_URL#$prefix}
+  local parts=(${HAWQ_URL//:/ })
+  if [ ${#parts[@]} != 2 ]; then
+    fail "Incorrect value for HAWQ Master host and port."
+  fi
+  HAWQ_HOST=${parts[0]}
+  HAWQ_PORT=${parts[1]}
+}
+
+function get_hawq_user() {
+  local default="gpadmin"
+  while [[ -z "$HAWQ_USER" ]]
+  do
+    HAWQ_USER=$(read_value "HAWQ user name [${default}]")
+  done
+}
+
+function get_hawq_password() {
+  while [[ -z "$HAWQ_PASSWORD" ]]
+  do
+    HAWQ_PASSWORD=$(read_password "HAWQ password")
+    echo
+  done
+}
+
+function parse_params() {
+  while [[ $# -gt 0 ]] 
+  do
+    key="$1"
+    case $key in
+      -r)
+        RANGER_URL="$2"
+        shift
+        ;;
+      -u)
+        RANGER_USER="$2"
+        shift
+        ;;
+      -p)
+        RANGER_PASSWORD="$2"
+        shift
+        ;;
+      -h)
+        HAWQ_URL="$2"
+        shift
+        ;;
+      -w)
+        HAWQ_USER="$2"
+        shift
+        ;;
+      -q)
+        HAWQ_PASSWORD="$2"
+        shift
+        ;;
+      *)
+        usage
+        ;;
+    esac
+    shift
+  done
+}
+
+function validate_params() {
+  get_ranger_url
+  get_ranger_user
+  get_ranger_password
+  get_hawq_url
+  get_hawq_user
+  get_hawq_password
+  echo "RANGER URL  = ${RANGER_URL}" 
+  echo "RANGER User = ${RANGER_USER}" 
+  echo "RANGER Password = $(mask ${RANGER_PASSWORD})" 
+  echo "HAWQ HOST = ${HAWQ_HOST}"
+  echo "HAWQ PORT = ${HAWQ_PORT}"  
+  echo "HAWQ User = ${HAWQ_USER}" 
+  echo "HAWQ Password = $(mask ${HAWQ_PASSWORD})" 
+}
+
+function check_hawq_service_definition() {
+  echo $(curl -sS -u ${RANGER_USER}:${RANGER_PASSWORD} http://${RANGER_URL}/service/public/v2/api/servicedef/name/hawq | grep hawq | wc -l)
+}
+
+function create_hawq_service_definition() {
+  if [ $(check_hawq_service_definition) == 0 ]; then
+    local json_file="$(dirname ${SCRIPT_DIR})/etc/ranger-servicedef-hawq.json"
+    if [ ! -f ${json_file} ]; then
+      fail "File ${json_file} not found."
+    fi
+    echo "HAWQ service definition was not found in Ranger Admin, creating it by uploading ${json_file}"
+    local output=$(curl -sS -u ${RANGER_USER}:${RANGER_PASSWORD} -H "Content-Type: application/json" -X POST http://${RANGER_URL}/service/plugins/definitions -d @${json_file})
+    local created=$(echo ${output} | grep created | wc -l)
+    if [ ${created} == 0 ] || [ $(check_hawq_service_definition) == 0 ]; then
+      fail "Creation of HAWQ service definition from ${json_file} in Ranger Admin at ${RANGER_URL} failed. ${output}"
+    fi
+  else
+    echo "HAWQ service definition already exists in Ranger Admin, nothing to do." 
+  fi
+}
+
+function check_hawq_service_instance() {
+  echo $(curl -sS -u ${RANGER_USER}:${RANGER_PASSWORD} http://${RANGER_URL}/service/public/v2/api/service/name/hawq | grep hawq | wc -l)
+}
+
+function create_hawq_service_instance() {
+  if [ $(check_hawq_service_instance) == 0 ]; then
+    local payload="{\"name\":\"hawq\",
+                    \"type\":\"hawq\",
+                    \"description\":\"HAWQ Master\",
+                    \"isEnabled\":true,
+                    \"configs\":{\"username\":\"${HAWQ_USER}\",
+                               \"password\":\"${HAWQ_PASSWORD}\",
+                               \"hostname\":\"${HAWQ_HOST}\",
+                               \"port\":\"${HAWQ_PORT}\"}}"
+
+    echo "HAWQ service instance was not found in Ranger Admin, creating it."
+    local output=$(curl -sS -u ${RANGER_USER}:${RANGER_PASSWORD} -H "Content-Type: application/json" -X POST http://${RANGER_URL}/service/public/v2/api/service -d "${payload}")
+    local created=$(echo ${output} | grep created | wc -l)
+    if [ ${created} == 0 ] || [ $(check_hawq_service_instance) == 0 ]; then
+      fail "Creation of HAWQ service instance in Ranger Admin at ${RANGER_URL} failed. ${output}"
+    fi
+  else
+    echo "HAWQ service instance already exists in Ranger Admin, nothing to do."
+  fi
+}
+
+main() {
+  if [[ $# -lt 1 ]]; then
+    usage
+  fi
+  SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+  parse_params "$@"
+  validate_params
+  create_hawq_service_definition
+  create_hawq_service_instance
+}
+main "$@"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/scripts/rps.sh
----------------------------------------------------------------------
diff --git a/ranger-plugin/scripts/rps.sh b/ranger-plugin/scripts/rps.sh
new file mode 100755
index 0000000..e8ccf3a
--- /dev/null
+++ b/ranger-plugin/scripts/rps.sh
@@ -0,0 +1,60 @@
+#!/usr/bin/env bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+if [ $# -le 0 ]; then
+  echo "Usage: rps (start|stop|init) [<catalina-args...>]"
+  exit 1
+fi
+
+actionCmd=$1
+shift
+
+CWDIR=$( cd $( dirname ${BASH_SOURCE[0]} ) && pwd )
+source $CWDIR/rps_env.sh
+
+setup_rps() {
+  echo "Initializing Hawq Ranger Plugin Service..."
+  cp $CATALINA_HOME/conf.template/* $CATALINA_BASE/conf
+  cp $CATALINA_BASE/conf/tomcat-server.xml $CATALINA_BASE/conf/server.xml
+  pushd $CATALINA_BASE/webapps >/dev/null
+  unzip -d rps rps.war >/dev/null
+  find . -name ranger-hawq-security.xml | xargs sed -i \
+    "s/localhost:6080/$RANGER_ADMIN_HOST:$RANGER_ADMIN_PORT/g"
+  popd >/dev/null
+  echo "Hawq Ranger Plugin Service installed on http://$RPS_HOST:$RPS_PORT/rps"
+  echo "Please use 'rps.sh start' to start the service"
+}
+
+case $actionCmd in
+  (init)
+    setup_rps
+    ;;
+  (start)
+    $CATALINA_HOME/bin/catalina.sh start "$@"
+    echo "Waiting for RPS service to start..."
+    sleep 15
+    ;;
+  (stop)
+    $CATALINA_HOME/bin/catalina.sh stop "$@"
+    echo "Waiting for RPS service to stop..."
+    sleep 10
+    ;;
+esac

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/scripts/rps_env.sh
----------------------------------------------------------------------
diff --git a/ranger-plugin/scripts/rps_env.sh b/ranger-plugin/scripts/rps_env.sh
new file mode 100755
index 0000000..ae36e8f
--- /dev/null
+++ b/ranger-plugin/scripts/rps_env.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+export CATALINA_HOME=/usr/lib/bigtop-tomcat
+export CATALINA_BASE=/usr/local/hawq/ranger/plugin-service
+
+export RANGER_ADMIN_HOST=${RANGER_ADMIN_HOST:-localhost}
+export RANGER_ADMIN_PORT=${RANGER_ADMIN_PORT:-6080}
+
+export RPS_HOST=${RPS_HOST:-localhost}
+export RPS_PORT=${RPS_PORT:-8432}
+export CATALINA_OPTS="-Dhttp.host=$RPS_HOST -Dhttp.port=$RPS_PORT"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/pom.xml
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/pom.xml b/ranger-plugin/service/pom.xml
new file mode 100644
index 0000000..3f2f9f8
--- /dev/null
+++ b/ranger-plugin/service/pom.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.apache.hawq</groupId>
+    <artifactId>ranger-plugin-service</artifactId>
+    <packaging>war</packaging>
+    <name>HAWQ Ranger Service</name>
+    <description>HAWQ Ranger Service</description>
+    <parent>
+        <groupId>org.apache.hawq</groupId>
+        <artifactId>ranger-plugin</artifactId>
+        <version>2.1.0.0</version>
+        <relativePath>..</relativePath>
+    </parent>
+    <build>
+        <resources>
+            <resource>
+                <directory>src/main/resources</directory>
+                <filtering>true</filtering>
+            </resource>
+        </resources>
+        <plugins>
+            <plugin>
+                <!-- use mvn tomcat6:run-war to run the appserver with the app deployed -->
+                <groupId>org.apache.tomcat.maven</groupId>
+                <artifactId>tomcat6-maven-plugin</artifactId>
+                <version>2.2</version>
+                <configuration>
+                    <path>/rps</path>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.codehaus.jackson</groupId>
+            <artifactId>jackson-xc</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.ranger</groupId>
+            <artifactId>ranger-plugins-common</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>log4j</groupId>
+            <artifactId>log4j</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.mockito</groupId>
+            <artifactId>mockito-core</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.powermock</groupId>
+            <artifactId>powermock-module-junit4</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.powermock</groupId>
+            <artifactId>powermock-api-mockito</artifactId>
+        </dependency>
+    </dependencies>
+
+    
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/HawqAuthorizer.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/HawqAuthorizer.java b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/HawqAuthorizer.java
new file mode 100644
index 0000000..625ce50
--- /dev/null
+++ b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/HawqAuthorizer.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization;
+
+import org.apache.hawq.ranger.authorization.model.AuthorizationRequest;
+import org.apache.hawq.ranger.authorization.model.AuthorizationResponse;
+
+/**
+ * Interface for making authorization decisions.
+ */
+public interface HawqAuthorizer {
+
+    /**
+     * Determines whether access should be allowed for the given authorization request.
+     * @param request authorization request
+     * @return authorization response
+     */
+    AuthorizationResponse isAccessAllowed(AuthorizationRequest request);
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizer.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizer.java b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizer.java
new file mode 100644
index 0000000..04d6f99
--- /dev/null
+++ b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizer.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization;
+
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.collections.MapUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hawq.ranger.authorization.model.AuthorizationRequest;
+import org.apache.hawq.ranger.authorization.model.AuthorizationResponse;
+import org.apache.hawq.ranger.authorization.model.HawqPrivilege;
+import org.apache.hawq.ranger.authorization.model.HawqResource;
+import org.apache.hawq.ranger.authorization.model.ResourceAccess;
+import org.apache.ranger.plugin.policyengine.RangerAccessRequest;
+import org.apache.ranger.plugin.policyengine.RangerAccessRequestImpl;
+import org.apache.ranger.plugin.policyengine.RangerAccessResource;
+import org.apache.ranger.plugin.policyengine.RangerAccessResourceImpl;
+import org.apache.ranger.plugin.policyengine.RangerAccessResult;
+import org.apache.ranger.plugin.service.RangerBasePlugin;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import static org.apache.hawq.ranger.authorization.Utils.HAWQ;
+
+/**
+ * Authorizer implementation that uses Ranger to make access decision. Implemented as a singleton.
+ */
+public class RangerHawqAuthorizer implements HawqAuthorizer {
+
+    private static final Log LOG = LogFactory.getLog(RangerHawqAuthorizer.class);
+
+    private static final RangerHawqAuthorizer INSTANCE = new RangerHawqAuthorizer();
+
+    private RangerBasePlugin rangerPlugin;
+
+    /**
+     * Returns the instance of the RangerHawqAuthorizer.
+     * @return the singleton instance
+     */
+    public static RangerHawqAuthorizer getInstance() {
+        return INSTANCE;
+    }
+
+    /**
+     * Constructor. Initializes Ranger Base Plugin to fetch policies from Ranger.
+     */
+    private RangerHawqAuthorizer() {
+
+        LOG.info("Initializing RangerHawqAuthorizer");
+
+        String appId = Utils.getAppId();
+
+        LOG.info(String.format("Initializing RangerBasePlugin for service %s:%s", HAWQ, appId));
+        rangerPlugin = new RangerBasePlugin(HAWQ, appId);
+        rangerPlugin.init();
+        LOG.info(String.format("Initialized RangerBasePlugin for service %s:%s", HAWQ, appId));
+    }
+
+    @Override
+    public AuthorizationResponse isAccessAllowed(AuthorizationRequest request) {
+
+        // validate request to make sure no data is missing
+        validateRequest(request);
+
+        // prepare response object
+        AuthorizationResponse response = new AuthorizationResponse();
+        response.setRequestId(request.getRequestId());
+        Set<ResourceAccess> access = new HashSet<>();
+        response.setAccess(access);
+
+        // iterate over resource requests, augment processed ones with the decision and add to the response
+        for (ResourceAccess resourceAccess : request.getAccess()) {
+            boolean accessAllowed = authorizeResource(resourceAccess, request.getUser());
+            resourceAccess.setAllowed(accessAllowed);
+            access.add(resourceAccess);
+        }
+
+        return response;
+    }
+
+    /**
+     * Authorizes access to a single resource for a given user.
+     *
+     * @param resourceAccess resource to authorize access to
+     * @param user user requesting authorization
+     * @return true if access is authorized, false otherwise
+     */
+    private boolean authorizeResource(ResourceAccess resourceAccess, String user) {
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug(String.format("Request: access for user=%s to resource=%s with privileges=%s",
+                user, resourceAccess.getResource(), resourceAccess.getPrivileges()));
+        }
+
+        RangerAccessResourceImpl rangerResource = new RangerAccessResourceImpl();
+        //resource.setOwnerUser();
+        for (Map.Entry<HawqResource, String> resourceEntry : resourceAccess.getResource().entrySet()) {
+            rangerResource.setValue(resourceEntry.getKey().name(), resourceEntry.getValue());
+        }
+
+        boolean accessAllowed = true;
+        // iterate over all privileges requested
+        for (HawqPrivilege privilege : resourceAccess.getPrivileges()) {
+            // TODO not clear how we will get user groups -- Kerberos case ?
+            Set<String> userGroups = Collections.emptySet();
+            boolean privilegeAuthorized = authorizeResourcePrivilege(rangerResource, privilege.name(), user, userGroups);
+            // ALL model of evaluation -- all privileges must be authorized for access to be allowed
+            if (!privilegeAuthorized) {
+                accessAllowed = false;
+                break; // terminate early if even a single privilege is not authorized
+            }
+        }
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug(String.format("Decision: accessAllowed=%s for user=%s to resource=%s with privileges=%s",
+                    accessAllowed, user, resourceAccess.getResource(), resourceAccess.getPrivileges()));
+        }
+
+        return accessAllowed;
+    }
+
+    /**
+     * Authorizes access of a given type (privilege) to a single resource for a given user.
+     *
+     * @param rangerResource resource to authorize access to
+     * @param accessType privilege requested for a given resource
+     * @param user user requesting authorization
+     * @param userGroups groups a user belongs to
+     * @return true if access is authorized, false otherwise
+     */
+    private boolean authorizeResourcePrivilege(RangerAccessResource rangerResource, String accessType, String user, Set<String> userGroups) {
+
+        Map<String, String> resourceMap = rangerResource.getAsMap();
+        String database = resourceMap.get(HawqResource.database.name());
+        String schema = resourceMap.get(HawqResource.schema.name());
+        int resourceSize = resourceMap.size();
+
+        // special handling for non-leaf policies
+        if (accessType.equals(HawqPrivilege.create.name()) && database != null && schema == null && resourceSize == 1) {
+            accessType = HawqPrivilege.create_schema.toValue();
+            LOG.debug("accessType mapped to: create-schema");
+        } else if (accessType.equals(HawqPrivilege.usage.name()) && database != null && schema != null && resourceSize == 2) {
+            accessType = HawqPrivilege.usage_schema.toValue();
+            LOG.debug("accessType mapped to: usage-schema");
+        }
+
+        RangerAccessRequest rangerRequest = new RangerAccessRequestImpl(rangerResource, accessType, user, userGroups);
+        RangerAccessResult result = rangerPlugin.isAccessAllowed(rangerRequest);
+        boolean accessAllowed = result != null && result.getIsAllowed();
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug(String.format("--- RangerDecision: accessAllowed=%s for user=%s to resource=%s with privileges=%s, result present=%s",
+                    accessAllowed, user, rangerResource.getAsString(), accessType, result!=null));
+        }
+
+        return accessAllowed;
+    }
+
+    /**
+     * Validates that authorization requests do not have any missing data.
+     *
+     * @param request authorization request
+     * @throws IllegalArgumentException if any data is missing
+     */
+    private void validateRequest(AuthorizationRequest request) {
+        LOG.debug("Validating authorization request");
+
+        if (request == null) {
+            throw new IllegalArgumentException("request is null");
+        }
+
+        if (request.getRequestId() == null) {
+            throw new IllegalArgumentException("requestId field is missing or null in the request");
+        }
+
+        if (StringUtils.isEmpty(request.getUser())) {
+            throw new IllegalArgumentException("user field is missing or empty in the request");
+        }
+
+        if (StringUtils.isEmpty(request.getClientIp())) {
+            throw new IllegalArgumentException("clientIp field is missing or empty in the request");
+        }
+
+        if (StringUtils.isEmpty(request.getContext())) {
+            throw new IllegalArgumentException("context field is missing or empty in the request");
+        }
+
+        Set<ResourceAccess> accessSet = request.getAccess();
+        if (CollectionUtils.isEmpty(accessSet)) {
+            throw new IllegalArgumentException("access field is missing or empty in the request");
+        }
+
+        for (ResourceAccess access : accessSet) {
+            validateResourceAccess(access);
+        }
+
+        LOG.debug("Successfully validated authorization request");
+    }
+
+    /**
+     * Validates that resource access does not have any missing data.
+     *
+     * @param access resource access data
+     * @throws IllegalArgumentException if any data is missing
+     */
+    private void validateResourceAccess(ResourceAccess access) {
+        Map<HawqResource, String> resourceMap = access.getResource();
+        if  (MapUtils.isEmpty(resourceMap)) {
+            throw new IllegalArgumentException("resource field is missing or empty in the request");
+        }
+        for (Map.Entry<HawqResource, String> resourceEntry : resourceMap.entrySet()) {
+            if (StringUtils.isEmpty(resourceEntry.getValue())) {
+                throw new IllegalArgumentException(
+                        String.format("resource value is missing for key=%s in the request", resourceEntry.getKey())
+                );
+            }
+        }
+        if (CollectionUtils.isEmpty(access.getPrivileges())) {
+            throw new IllegalArgumentException("set of privileges is missing empty in the request");
+        }
+    }
+
+
+
+    /**
+     * Sets an instance of the Ranger Plugin for testing.
+     *
+     * @param plugin plugin instance to use while testing
+     */
+    void setRangerPlugin(RangerBasePlugin plugin) {
+        rangerPlugin = plugin;
+    }
+
+    /**
+     * Returns the instance of the Ranger Plugin for testing.
+     *
+     * @return BaseRangerPlugin instance
+     */
+    RangerBasePlugin getRangerPlugin() {
+        return rangerPlugin;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/RangerHawqPluginResource.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/RangerHawqPluginResource.java b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/RangerHawqPluginResource.java
new file mode 100644
index 0000000..26a7660
--- /dev/null
+++ b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/RangerHawqPluginResource.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization;
+
+import com.sun.jersey.spi.resource.Singleton;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hawq.ranger.authorization.model.AuthorizationRequest;
+import org.apache.hawq.ranger.authorization.model.AuthorizationResponse;
+
+import javax.ws.rs.*;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.Date;
+
+/**
+ * JAX-RS resource for the authorization endpoint.
+ */
+@Path("/")
+@Singleton
+public class RangerHawqPluginResource {
+
+    private static final Log LOG = LogFactory.getLog(RangerHawqPluginResource.class);
+
+    private HawqAuthorizer authorizer;
+    private String version;
+
+    /**
+     * Constructor. Creates a new instance of the resource that uses <code>RangerHawqAuthorizer</code>.
+     */
+    public RangerHawqPluginResource() {
+        this.authorizer = RangerHawqAuthorizer.getInstance();
+    }
+
+
+    @Path("/version")
+    @GET
+    @Produces(MediaType.APPLICATION_JSON)
+    public Response version()
+    {
+        String output = "{\"version\":\"" + Utils.getVersion() + "\"}";
+        return Response.status(200).entity(output).build();
+    }
+
+    /**
+     * Authorizes a request to access protected resources with requested privileges.
+     * @param request authorization request
+     * @return authorization response
+     */
+    @POST
+    @Consumes(MediaType.APPLICATION_JSON)
+    @Produces(MediaType.APPLICATION_JSON)
+    public AuthorizationResponse authorize(AuthorizationRequest request)
+    {
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Received authorization request: " + request);
+        }
+
+        // exceptions are handled by ServiceExceptionMapper
+        AuthorizationResponse response = authorizer.isAccessAllowed(request);
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Returning authorization response: " + response);
+        }
+        return response;
+    }
+}
+
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/ServiceExceptionMapper.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/ServiceExceptionMapper.java b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/ServiceExceptionMapper.java
new file mode 100644
index 0000000..b983d72
--- /dev/null
+++ b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/ServiceExceptionMapper.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.ext.ExceptionMapper;
+import javax.ws.rs.ext.Provider;
+
+/**
+ * Maps service exceptions to HTTP response.
+ */
+@Provider
+public class ServiceExceptionMapper implements ExceptionMapper<Throwable> {
+
+    private static final Log LOG = LogFactory.getLog(ServiceExceptionMapper.class);
+
+    @Override
+    public Response toResponse(Throwable e) {
+
+        LOG.error("Service threw an exception: ", e);
+
+        // default to internal server error (HTTP 500)
+        Response.Status status = Response.Status.INTERNAL_SERVER_ERROR;
+
+        if (e instanceof IllegalArgumentException) {
+            status = Response.Status.BAD_REQUEST;
+        }
+
+        ErrorPayload error = new ErrorPayload(status.getStatusCode(), e.getMessage());
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug(String.format("Returning error response: status=%s message=%s",
+                                    error.getStatus(), error.getMessage()));
+        }
+
+        return Response.status(status)
+                .entity(error)
+                .type(MediaType.APPLICATION_JSON)
+                .build();
+    }
+
+    /**
+     * Represents payload to be serialized as JSON into response.
+     */
+    public static class ErrorPayload {
+        private int status;
+        private String message;
+
+        /**
+         * Constructor.
+         * @param status HTTP error status
+         * @param message error message
+         */
+        public ErrorPayload(int status, String message) {
+            this.status = status;
+            this.message = message;
+        }
+
+        /**
+         * Returns status code
+         * @return status code
+         */
+        public int getStatus() {
+            return status;
+        }
+
+        /**
+         * Returns error message
+         * @return error message
+         */
+        public String getMessage() {
+            return message;
+        }
+
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/Utils.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/Utils.java b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/Utils.java
new file mode 100644
index 0000000..86f7fc4
--- /dev/null
+++ b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/Utils.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+/**
+ * Utility class for reading values from the property file.
+ */
+public abstract class Utils {
+
+    public static final String HAWQ = "hawq";
+    public static final String UNKNOWN = "unknown";
+    public static final String APP_ID_PROPERTY = "ranger.hawq.instance";
+    public static final String VERSION_PROPERTY = "version";
+    public static final String RANGER_SERVICE_PROPERTY_FILE = "rps.properties";
+
+    private static final Log LOG = LogFactory.getLog(Utils.class);
+    private static final Properties properties = readPropertiesFromFile();
+
+    /**
+     * Retrieves the app id from the environment variable with the key ranger.hawq.instance
+     * or from the rps.properties file with the key ranger.hawq.instance
+     *
+     * If none exist, hawq is used as the default
+     *
+     * @return String id of the app
+     */
+    public static String getAppId() {
+        return System.getProperty(APP_ID_PROPERTY, properties.getProperty(APP_ID_PROPERTY, HAWQ));
+    }
+
+    /**
+     * Retrieves the version read from the property file.
+     *
+     * If none exist, unknown is used as the default
+     *
+     * @return version of the service
+     */
+    public static String getVersion() {
+        return properties.getProperty(VERSION_PROPERTY, UNKNOWN);
+    }
+
+    /**
+     * Reads properties from the property file.
+     * @return properties read from the file
+     */
+    private static Properties readPropertiesFromFile() {
+        Properties prop = new Properties();
+        InputStream inputStream = Utils.class.getClassLoader().getResourceAsStream(RANGER_SERVICE_PROPERTY_FILE);
+        try {
+            prop.load(inputStream);
+        } catch (IOException e) {
+            LOG.error("Failed to read from: " + RANGER_SERVICE_PROPERTY_FILE);
+        }
+        return prop;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/AuthorizationRequest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/AuthorizationRequest.java b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/AuthorizationRequest.java
new file mode 100644
index 0000000..6e8ab4f
--- /dev/null
+++ b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/AuthorizationRequest.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization.model;
+
+import java.util.Set;
+
+/**
+ * Model for submitting an authorization request for multiple resources.
+ */
+public class AuthorizationRequest {
+
+    private Integer requestId;
+    private String user;
+    private String clientIp;
+    private String context;
+    private Set<ResourceAccess> access;
+
+    /**
+     * Returns request id.
+     * @return id of the request
+     */
+    public Integer getRequestId() {
+        return requestId;
+    }
+
+    /**
+     * Sets request id.
+     * @param requestId id of the request
+     */
+    public void setRequestId(Integer requestId) {
+        this.requestId = requestId;
+    }
+
+    /**
+     * Returns user name for the user submitting the access request.
+     * @return name of the user
+     */
+    public String getUser() {
+        return user;
+    }
+
+    /**
+     * Sets user name for the user submitting the access request.
+     * @param user name of the user
+     */
+    public void setUser(String user) {
+        this.user = user;
+    }
+
+    /**
+     * Returns IP address of the client where the user request is made from.
+     * @return IP address of the user's client
+     */
+    public String getClientIp() {
+        return clientIp;
+    }
+
+    /**
+     * Sets IP address of the client where the user request is made from.
+     * @param clientIp IP address of the user's client
+     */
+    public void setClientIp(String clientIp) {
+        this.clientIp = clientIp;
+    }
+
+    /**
+     * Returns context of the request, usually a SQL query that the user ran
+     * @return context of the request
+     */
+    public String getContext() {
+        return context;
+    }
+
+    /**
+     * Sets the context of the request, usually a SQL query that the user ran
+     * @param context context of the request
+     */
+    public void setContext(String context) {
+        this.context = context;
+    }
+
+    /**
+     * Returns a set of <code>ResourceAccess</code> objects.
+     * @return s set of <code>ResourceAccess</code> objects
+     */
+    public Set<ResourceAccess> getAccess() {
+        return access;
+    }
+
+    /**
+     * Sets <code>ResourceAccess</code> objects
+     * @param access a set of <code>ResourceAccess</code> objects
+     */
+    public void setAccess(Set<ResourceAccess> access) {
+        this.access = access;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/AuthorizationResponse.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/AuthorizationResponse.java b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/AuthorizationResponse.java
new file mode 100644
index 0000000..f989b8d
--- /dev/null
+++ b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/AuthorizationResponse.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization.model;
+
+import java.util.Set;
+
+/**
+ *  Model for response containing authorization decisions for access to multiple resources.
+ */
+public class AuthorizationResponse {
+
+    private Integer requestId;
+    private Set<ResourceAccess> access;
+
+    /**
+     * Returns request id.
+     * @return id of the request
+     */
+    public Integer getRequestId() {
+        return requestId;
+    }
+
+    /**
+     * Sets request id.
+     * @param requestId id of the request
+     */
+    public void setRequestId(Integer requestId) {
+        this.requestId = requestId;
+    }
+
+    /**
+     * Returns a set of <code>ResourceAccess</code> objects.
+     * @return a set of <code>ResourceAccess</code> objects
+     */
+    public Set<ResourceAccess> getAccess() {
+        return access;
+    }
+
+    /**
+     * Sets <code>ResourceAccess</code> objects
+     * @param access a set of <code>ResourceAccess</code> objects
+     */
+    public void setAccess(Set<ResourceAccess> access) {
+        this.access = access;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqPrivilege.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqPrivilege.java b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqPrivilege.java
new file mode 100644
index 0000000..ffce67a
--- /dev/null
+++ b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqPrivilege.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization.model;
+
+import org.codehaus.jackson.annotate.JsonCreator;
+import org.codehaus.jackson.annotate.JsonValue;
+
+/**
+ *  Model enumeration of types of HAWQ privileges.
+ */
+public enum HawqPrivilege {
+    select,
+    insert,
+    update,
+    delete,
+    references,
+    usage,
+    create,
+    connect,
+    execute,
+    temp,
+    create_schema,
+    usage_schema,
+    all;
+
+    /**
+     * Returns HawqPrivilege type by case-insensitive lookup of the value.
+     * @param key case insensitive string representation of the privilege
+     * @return instance of HawqPrivilege
+     */
+    @JsonCreator
+    public static HawqPrivilege fromString(String key) {
+        return key == null ? null : HawqPrivilege.valueOf(key.replace('-', '_').toLowerCase());
+    }
+
+    /**
+     * Returns string representation of the enum, replaces underscores with dashes.
+     * @return string representation of the enum
+     */
+    @JsonValue
+    public String toValue() {
+        return name().replace('_', '-');
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqResource.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqResource.java b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqResource.java
new file mode 100644
index 0000000..c9de4e7
--- /dev/null
+++ b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqResource.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization.model;
+
+import org.codehaus.jackson.annotate.JsonCreator;
+
+/**
+ * Model enumeration of types of HAWQ resources.
+ */
+public enum HawqResource {
+    database,
+    schema,
+    table,
+    sequence,
+    function,
+    language,
+    tablespace,
+    protocol;
+
+    /**
+     * Returns HawqResource type by case-insensitive lookup of the value.
+     * @param key case insensitive string representation of the resource
+     * @return instance of HawqResource
+     */
+    @JsonCreator
+    public static HawqResource fromString(String key) {
+        return key == null ? null : HawqResource.valueOf(key.toLowerCase());
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/ResourceAccess.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/ResourceAccess.java b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/ResourceAccess.java
new file mode 100644
index 0000000..df1631a
--- /dev/null
+++ b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/ResourceAccess.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.authorization.model;
+
+import org.apache.commons.lang.builder.ToStringBuilder;
+
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+
+/**
+ * Model object for requesting access to a single resource.
+ */
+public class ResourceAccess {
+
+    private Map<HawqResource, String> resource;
+    private Set<HawqPrivilege> privileges;
+    private boolean allowed = false;
+
+    public Set<HawqPrivilege> getPrivileges() {
+        return privileges;
+    }
+
+    public void setPrivileges(Set<HawqPrivilege> privileges) {
+        this.privileges = privileges;
+    }
+
+    public boolean isAllowed() {
+        return allowed;
+    }
+
+    public void setAllowed(boolean allowed) {
+        this.allowed = allowed;
+    }
+
+    public Map<HawqResource, String> getResource() {
+        return resource;
+    }
+
+    public void setResource(Map<HawqResource, String> resource) {
+        this.resource = resource;
+    }
+
+    @Override
+    public String toString() {
+        return new ToStringBuilder(this)
+                .append("resource", resource)
+                .append("privileges", privileges)
+                .append("allowed", allowed)
+                .toString();
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+        ResourceAccess that = (ResourceAccess) o;
+        return allowed == that.allowed &&
+                Objects.equals(resource, that.resource) &&
+                Objects.equals(privileges, that.privileges);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(resource, privileges, allowed);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/resources/log4j.properties b/ranger-plugin/service/src/main/resources/log4j.properties
new file mode 100644
index 0000000..6bbdaed
--- /dev/null
+++ b/ranger-plugin/service/src/main/resources/log4j.properties
@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# see debug messages during unit tests
+project.root.logger=DEBUG,console
+
+# suppress all logging output during unit tests
+#project.root.logger=FATAL,devnull
+
+#
+# Loggers
+#
+log4j.rootLogger=${project.root.logger}
+
+# ignore most errors from the Apache Ranger and Hadoop for unit tests
+log4j.logger.org.apache.ranger=FATAL
+log4j.logger.org.apache.hadoop=FATAL
+
+#
+# Appenders
+#
+
+# nothing
+log4j.appender.devnull=org.apache.log4j.varia.NullAppender
+
+# console
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/resources/ranger-hawq-security.xml
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/resources/ranger-hawq-security.xml b/ranger-plugin/service/src/main/resources/ranger-hawq-security.xml
new file mode 100644
index 0000000..46dd75d
--- /dev/null
+++ b/ranger-plugin/service/src/main/resources/ranger-hawq-security.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+    <property>
+        <name>ranger.plugin.hawq.service.name</name>
+        <value>hawq</value>
+        <description>
+            Name of the Ranger service containing policies for this HAWQ instance
+        </description>
+    </property>
+
+    <property>
+        <name>ranger.plugin.hawq.policy.source.impl</name>
+        <value>org.apache.ranger.admin.client.RangerAdminRESTClient</value>
+        <description>
+            Class to retrieve policies from the source
+        </description>
+    </property>
+
+    <property>
+        <name>ranger.plugin.hawq.policy.rest.url</name>
+        <value>http://localhost:6080</value>
+        <description>
+            URL to Ranger Admin
+        </description>
+    </property>
+
+    <property>
+        <name>ranger.plugin.hawq.policy.rest.ssl.config.file</name>
+        <value>/usr/local/hawq/ranger/etc/ranger-policymgr-ssl.xml</value>
+        <description>
+            Path to the file containing SSL details to contact Ranger Admin
+        </description>
+    </property>
+
+    <property>
+        <name>ranger.plugin.hawq.policy.pollIntervalMs</name>
+        <value>30000</value>
+        <description>
+            How often to poll for changes in policies?
+        </description>
+    </property>
+
+    <property>
+        <name>ranger.plugin.hawq.policy.cache.dir</name>
+        <value>/usr/local/hawq/ranger/policycache</value>
+        <description>
+            Directory where Ranger policies are cached after successful retrieval from the source
+        </description>
+    </property>
+
+    <!--
+    <property>
+        <name>xasecure.hive.update.xapolicies.on.grant.revoke</name>
+        <value>true</value>
+        <description>Should Hive plugin update Ranger policies for updates to permissions done using GRANT/REVOKE?</description>
+    </property>
+    -->
+    <property>
+        <name>ranger.plugin.hawq.policy.rest.client.connection.timeoutMs</name>
+        <value>120000</value>
+        <description>
+            RangerRESTClient Connection Timeout in Milliseconds
+        </description>
+    </property>
+
+    <property>
+        <name>ranger.plugin.hawq.policy.rest.client.read.timeoutMs</name>
+        <value>30000</value>
+        <description>
+            RangerRESTClient read Timeout in Milliseconds
+        </description>
+    </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/resources/rps.properties
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/resources/rps.properties b/ranger-plugin/service/src/main/resources/rps.properties
new file mode 100644
index 0000000..9e2b1f4
--- /dev/null
+++ b/ranger-plugin/service/src/main/resources/rps.properties
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ranger.hawq.instance=hawq
+version=${project.version}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/main/webapp/WEB-INF/web.xml
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/webapp/WEB-INF/web.xml b/ranger-plugin/service/src/main/webapp/WEB-INF/web.xml
new file mode 100644
index 0000000..36c976f
--- /dev/null
+++ b/ranger-plugin/service/src/main/webapp/WEB-INF/web.xml
@@ -0,0 +1,72 @@
+<?xml version="1.0"?>
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+<web-app version="3.0"
+         xmlns="http://java.sun.com/xml/ns/javaee"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_3_0.xsd">
+
+    <!--
+        This is the HAWQ Ranger Plugin Service webapp xml descriptor
+
+        servlet-class	Jersey entrypoint class
+        init-param		com.sun.jersey.config.property.packages
+                            Tells Jersey where are the REST components of this webapp
+                        jersey.config.server.provider.scanning.recursive
+                            Tells Jersey to recusively scan package for REST resources
+        load-on-startup	Initialize the webapp on app server startup
+        servlet-mapping	Maps the path of the servlet (ranger-plugin/*)
+        listener		A class called after the webapp was initialized and before it's about to go down
+    -->
+
+    <servlet>
+        <servlet-name>HAWQ_Ranger_Plugin_Service</servlet-name>
+        <servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
+        <init-param>
+            <param-name>com.sun.jersey.config.property.packages</param-name>
+            <param-value>org.apache.hawq.ranger.authorization</param-value>
+        </init-param>
+        <init-param>
+            <param-name>com.sun.jersey.api.json.POJOMappingFeature</param-name>
+            <param-value>true</param-value>
+        </init-param>
+        <load-on-startup>1</load-on-startup>
+    </servlet>
+    <servlet-mapping>
+        <servlet-name>HAWQ_Ranger_Plugin_Service</servlet-name>
+        <url-pattern>/*</url-pattern>
+    </servlet-mapping>
+
+    <!--
+    <listener>
+        <listener-class>org.apache.hawq.pxf.service.rest.ServletLifecycleListener</listener-class>
+    </listener>
+    -->
+    <!-- log4j configuration
+         Log4jConfigListener looks for a file under log4jConfigLocation.
+         When not using absolute path, the path starts from the webapp root directory.
+         If this file cannot be read, log4j will revert to using the default
+         pxf-log4j.properties inside the webapp. -->
+    <context-param>
+        <param-name>log4jConfigLocation</param-name>
+        <param-value>/etc/pxf/conf/pxf-log4j.properties</param-value>
+    </context-param>
+</web-app>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizerAppIdTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizerAppIdTest.java b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizerAppIdTest.java
new file mode 100644
index 0000000..461864e
--- /dev/null
+++ b/ranger-plugin/service/src/test/java/org/apache/hawq/ranger/authorization/RangerHawqAuthorizerAppIdTest.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hawq.ranger.authorization;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.mockito.PowerMockito;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.when;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest(Utils.class)
+public class RangerHawqAuthorizerAppIdTest {
+
+    @Test
+    public void testAppIdIsSet() {
+        PowerMockito.mockStatic(Utils.class);
+        when(Utils.getAppId()).thenReturn("foo");
+        assertEquals("foo", RangerHawqAuthorizer.getInstance().getRangerPlugin().getAppId());
+    }
+}


[34/50] [abbrv] incubator-hawq git commit: HAWQ-1279. Force to recompute namespace_path when enable_ranger

Posted by es...@apache.org.
HAWQ-1279. Force to recompute namespace_path when enable_ranger


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/21f1e29a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/21f1e29a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/21f1e29a

Branch: refs/heads/2.1.0.0-incubating
Commit: 21f1e29a43aee003e250e41375bf226fabbe2ce3
Parents: 8261c13
Author: interma <in...@outlook.com>
Authored: Wed Jan 18 13:13:52 2017 +0800
Committer: hubertzhang <hu...@apache.org>
Committed: Thu Jan 19 10:56:01 2017 +0800

----------------------------------------------------------------------
 src/backend/catalog/namespace.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/21f1e29a/src/backend/catalog/namespace.c
----------------------------------------------------------------------
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 4685bfa..7049d32 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -1933,7 +1933,12 @@ recomputeNamespacePath(void)
 	 * Do nothing if path is already valid.
 	 */
 	if (namespaceSearchPathValid && namespaceUser == roleid)
-		return;
+	{
+		if (!enable_ranger)
+			return;
+		else
+			elog(DEBUG3, "recompute search_path[%s] when enable_ranger", namespace_search_path);
+	}
 
 	/* Need a modifiable copy of namespace_search_path string */
 	rawname = pstrdup(namespace_search_path);


[16/50] [abbrv] incubator-hawq git commit: HAWQ-1268. Update pom.xml to reflect the correct version for apache hawq 2.1.0.0-incubating

Posted by es...@apache.org.
HAWQ-1268. Update pom.xml to reflect the correct version for apache hawq 2.1.0.0-incubating


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/c8be9f29
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/c8be9f29
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/c8be9f29

Branch: refs/heads/2.1.0.0-incubating
Commit: c8be9f29e2bd35508ad529a64fb1884daec3155e
Parents: a817715
Author: Ruilong Huo <rh...@pivotal.io>
Authored: Fri Jan 13 10:04:13 2017 +0800
Committer: Ruilong Huo <rh...@pivotal.io>
Committed: Fri Jan 13 10:37:04 2017 +0800

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/c8be9f29/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index f128681..bdd2e75 100644
--- a/pom.xml
+++ b/pom.xml
@@ -22,7 +22,7 @@
  
   <groupId>org.apache.hawq</groupId>
   <artifactId>hawq</artifactId>
-  <version>2.0</version>
+  <version>2.1</version>
   <packaging>pom</packaging>
 
   <build>  


[08/50] [abbrv] incubator-hawq git commit: HAWQ-1242. hawq-site.xml default content has wrong guc variable names

Posted by es...@apache.org.
HAWQ-1242. hawq-site.xml default content has wrong guc variable names


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/8d22582c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/8d22582c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/8d22582c

Branch: refs/heads/2.1.0.0-incubating
Commit: 8d22582c76309184cdb961abcd2b66f65e6bebb1
Parents: 60f0933
Author: Yi <yj...@pivotal.io>
Authored: Wed Jan 4 14:10:09 2017 +1100
Committer: Yi <yj...@pivotal.io>
Committed: Wed Jan 4 14:10:09 2017 +1100

----------------------------------------------------------------------
 src/backend/utils/misc/guc.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/8d22582c/src/backend/utils/misc/guc.c
----------------------------------------------------------------------
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 00b9bad..dccd599 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -8195,7 +8195,7 @@ static struct config_string ConfigureNamesString[] =
   },
 
 	{
-		{"standby_address_host", PGC_POSTMASTER, PRESET_OPTIONS,
+		{"hawq_standby_address_host", PGC_POSTMASTER, PRESET_OPTIONS,
 			gettext_noop("standby server address hostname"),
 			NULL
 		},
@@ -8204,7 +8204,7 @@ static struct config_string ConfigureNamesString[] =
 	},
 
 	{
-		{"dfs_url", PGC_POSTMASTER, PRESET_OPTIONS,
+		{"hawq_dfs_url", PGC_POSTMASTER, PRESET_OPTIONS,
 			gettext_noop("hdfs url"),
 			NULL
 		},
@@ -8213,7 +8213,7 @@ static struct config_string ConfigureNamesString[] =
 	},
 
 	{
-		{"master_directory", PGC_POSTMASTER, PRESET_OPTIONS,
+		{"hawq_master_directory", PGC_POSTMASTER, PRESET_OPTIONS,
 			gettext_noop("master server data directory"),
 			NULL
 		},
@@ -8222,7 +8222,7 @@ static struct config_string ConfigureNamesString[] =
 	},
 
 	{
-		{"segment_directory", PGC_POSTMASTER, PRESET_OPTIONS,
+		{"hawq_segment_directory", PGC_POSTMASTER, PRESET_OPTIONS,
 			gettext_noop("segment data directory"),
 			NULL
 		},


[38/50] [abbrv] incubator-hawq git commit: HAWQ-1286. Reduce unnecessary calls of namespace check when run \d

Posted by es...@apache.org.
HAWQ-1286. Reduce unnecessary calls of namespace check when run \d


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/5da0476a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/5da0476a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/5da0476a

Branch: refs/heads/2.1.0.0-incubating
Commit: 5da0476a168a754a1c3d55096d6bbb31ecdd78ab
Parents: 517e6d2
Author: interma <in...@outlook.com>
Authored: Fri Jan 20 14:46:16 2017 +0800
Committer: interma <in...@outlook.com>
Committed: Fri Jan 20 14:46:16 2017 +0800

----------------------------------------------------------------------
 src/backend/catalog/namespace.c | 23 +++++++++++++++++++++++
 src/backend/tcop/postgres.c     |  4 ++++
 src/include/catalog/namespace.h |  6 +++++-
 3 files changed, 32 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/5da0476a/src/backend/catalog/namespace.c
----------------------------------------------------------------------
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 7049d32..a780625 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -48,9 +48,11 @@
 #include "utils/memutils.h"
 #include "utils/syscache.h"
 #include "utils/guc.h"
+#include "utils/hsearch.h"
 #include "cdb/cdbvars.h"
 #include "tcop/utility.h"
 
+
 /*
  * The namespace search path is a possibly-empty list of namespace OIDs.
  * In addition to the explicit list, several implicitly-searched namespaces
@@ -113,6 +115,8 @@
  * namespaceUser is the userid the path has been computed for.
  */
 
+extern const char *debug_query_string;
+
 static List *namespaceSearchPath = NIL;
 
 static Oid	namespaceUser = InvalidOid;
@@ -129,6 +133,9 @@ static bool tempCreationPending = false;
 /* The above five values are valid only if namespaceSearchPathValid */
 static bool namespaceSearchPathValid = true;
 
+/* store the query sign on the last call of recomputeNamespacePath(), and used the sign to judge cache invalidation */
+static uint32 last_query_sign = 0;
+
 /*
  * myTempNamespace is InvalidOid until and unless a TEMP namespace is set up
  * in a particular backend session (this happens when a CREATE TEMP TABLE
@@ -178,6 +185,11 @@ Datum		pg_my_temp_schema(PG_FUNCTION_ARGS);
 Datum		pg_is_other_temp_schema(PG_FUNCTION_ARGS);
 Datum       pg_objname_to_oid(PG_FUNCTION_ARGS);
 
+void
+reset_query_sign()
+{
+	last_query_sign = 0;
+}
 
 /*
  * GetCatalogId
@@ -1935,9 +1947,20 @@ recomputeNamespacePath(void)
 	if (namespaceSearchPathValid && namespaceUser == roleid)
 	{
 		if (!enable_ranger)
+		{
 			return;
+		}
 		else
+		{
+			uint32 current_query_sign = 0;
+			if (debug_query_string != NULL)
+				current_query_sign = string_hash(debug_query_string, strlen(debug_query_string));
+
+			if (current_query_sign == last_query_sign)
+				return;
+			last_query_sign = current_query_sign;
 			elog(DEBUG3, "recompute search_path[%s] when enable_ranger", namespace_search_path);
+		}
 	}
 
 	/* Need a modifiable copy of namespace_search_path string */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/5da0476a/src/backend/tcop/postgres.c
----------------------------------------------------------------------
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index fc71eda..74c5dd6 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -115,6 +115,7 @@
 #include "cdb/cdbinmemheapam.h"
 
 #include "utils/rangerrest.h"
+#include "catalog/namespace.h"
 
 #include "resourcemanager/dynrm.h"
 #include "resourcemanager/envswitch.h"
@@ -591,6 +592,9 @@ ReadCommand(StringInfo inBuf)
 		result = SocketBackend(inBuf);
 	else
 		result = InteractiveBackend(inBuf);
+
+	/* reset last_query_sign to 0 when running a new sql */
+	reset_query_sign();
 	return result;
 }
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/5da0476a/src/include/catalog/namespace.h
----------------------------------------------------------------------
diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h
index 34b3f1a..b8fce41 100644
--- a/src/include/catalog/namespace.h
+++ b/src/include/catalog/namespace.h
@@ -90,9 +90,13 @@ extern void AtEOXact_Namespace(bool isCommit);
 extern void AtEOSubXact_Namespace(bool isCommit, SubTransactionId mySubid,
 					  SubTransactionId parentSubid);
 
+extern List *fetch_search_path(bool includeImplicit);
+
+extern void reset_query_sign();
+
 /* stuff for search_path GUC variable */
 extern char *namespace_search_path;
 
-extern List *fetch_search_path(bool includeImplicit);
+
 
 #endif   /* NAMESPACE_H */


[03/50] [abbrv] incubator-hawq git commit: HAWQ-1239. Fail to call pg_rangercheck_batch() when when 'rte->rtekind != RTE_RELATION' or 'requiredPerms == 0'

Posted by es...@apache.org.
HAWQ-1239. Fail to call pg_rangercheck_batch() when when 'rte->rtekind != RTE_RELATION' or 'requiredPerms == 0'


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/cee573ad
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/cee573ad
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/cee573ad

Branch: refs/heads/2.1.0.0-incubating
Commit: cee573ad418a6e27a1cb13c66958a661af822e74
Parents: 4ca1587
Author: Chunling Wang <wa...@126.com>
Authored: Tue Dec 27 17:52:56 2016 +0800
Committer: hzhang2 <zh...@163.com>
Committed: Wed Dec 28 17:22:20 2016 +0800

----------------------------------------------------------------------
 src/backend/parser/parse_relation.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/cee573ad/src/backend/parser/parse_relation.c
----------------------------------------------------------------------
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 6839207..7dbe496 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -2745,10 +2745,10 @@ ExecCheckRTPermsWithRanger(List *rangeTable)
     RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
 
     if (rte->rtekind != RTE_RELATION)
-      return;
+      continue;
     requiredPerms = rte->requiredPerms;
     if (requiredPerms == 0)
-      return;
+      continue;
     
     relOid = rte->relid;
     userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
@@ -2763,6 +2763,9 @@ ExecCheckRTPermsWithRanger(List *rangeTable)
 
   } // foreach
 
+  if (ranger_check_args == NIL)
+    return;
+
   // ranger ACL check with package Oids
   List *aclresults = NIL;
   aclresults = pg_rangercheck_batch(ranger_check_args);


[17/50] [abbrv] incubator-hawq git commit: HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base.

Posted by es...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-site.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-site.xml
new file mode 100644
index 0000000..a810ca4
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-site.xml
@@ -0,0 +1,173 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+  <!-- KMS Backend KeyProvider -->
+
+  <property>
+    <name>hadoop.kms.key.provider.uri</name>
+    <value>jceks://file@/${user.home}/kms.keystore</value>
+    <description>
+      URI of the backing KeyProvider for the KMS.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.security.keystore.JavaKeyStoreProvider.password</name>
+    <value>none</value>
+    <description>
+      If using the JavaKeyStoreProvider, the password for the keystore file.
+    </description>
+  </property>
+
+  <!-- KMS Cache -->
+
+  <property>
+    <name>hadoop.kms.cache.enable</name>
+    <value>true</value>
+    <description>
+      Whether the KMS will act as a cache for the backing KeyProvider.
+      When the cache is enabled, operations like getKeyVersion, getMetadata,
+      and getCurrentKey will sometimes return cached data without consulting
+      the backing KeyProvider. Cached values are flushed when keys are deleted
+      or modified.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.cache.timeout.ms</name>
+    <value>600000</value>
+    <description>
+      Expiry time for the KMS key version and key metadata cache, in
+      milliseconds. This affects getKeyVersion and getMetadata.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.current.key.cache.timeout.ms</name>
+    <value>30000</value>
+    <description>
+      Expiry time for the KMS current key cache, in milliseconds. This
+      affects getCurrentKey operations.
+    </description>
+  </property>
+
+  <!-- KMS Audit -->
+
+  <property>
+    <name>hadoop.kms.audit.aggregation.window.ms</name>
+    <value>10000</value>
+    <description>
+      Duplicate audit log events within the aggregation window (specified in
+      ms) are quashed to reduce log traffic. A single message for aggregated
+      events is printed at the end of the window, along with a count of the
+      number of aggregated events.
+    </description>
+  </property>
+
+  <!-- KMS Security -->
+
+  <property>
+    <name>hadoop.kms.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication type for the KMS. Can be either &quot;simple&quot;
+      or &quot;kerberos&quot;.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.keytab</name>
+    <value>${user.home}/kms.keytab</value>
+    <description>
+      Path to the keytab with credentials for the configured Kerberos principal.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.principal</name>
+    <value>HTTP/localhost</value>
+    <description>
+      The Kerberos principal to use for the HTTP endpoint.
+      The principal must start with 'HTTP/' as per the Kerberos HTTP SPNEGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.name.rules</name>
+    <value>DEFAULT</value>
+    <description>
+      Rules used to resolve Kerberos principal names.
+    </description>
+  </property>
+
+  <!-- Authentication cookie signature source -->
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider</name>
+    <value>random</value>
+    <description>
+      Indicates how the secret to sign the authentication cookies will be
+      stored. Options are 'random' (default), 'string' and 'zookeeper'.
+      If using a setup with multiple KMS instances, 'zookeeper' should be used.
+    </description>
+  </property>
+
+  <!-- Configuration for 'zookeeper' authentication cookie signature source -->
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.path</name>
+    <value>/hadoop-kms/hadoop-auth-signature-secret</value>
+    <description>
+      The Zookeeper ZNode path where the KMS instances will store and retrieve
+      the secret from.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string</name>
+    <value>#HOSTNAME#:#PORT#,...</value>
+    <description>
+      The Zookeeper connection string, a list of hostnames and port comma
+      separated.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type</name>
+    <value>kerberos</value>
+    <description>
+      The Zookeeper authentication type, 'none' or 'sasl' (Kerberos).
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab</name>
+    <value>/etc/hadoop/conf/kms.keytab</value>
+    <description>
+      The absolute path for the Kerberos keytab with the credentials to
+      connect to Zookeeper.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal</name>
+    <value>kms/#HOSTNAME#</value>
+    <description>
+      The Kerberos service principal used to connect to Zookeeper.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/log4j.properties b/contrib/hawq-docker/centos7-docker/hawq-test/conf/log4j.properties
new file mode 100644
index 0000000..c901ab1
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/log4j.properties
@@ -0,0 +1,291 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=ALL
+
+# Null Appender
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Rolling File Appender - cap space usage at 5gb.
+#
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollover at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# HDFS block state change log from block manager
+#
+# Uncomment the following to suppress normal block state change
+# messages from BlockManager in NameNode.
+#log4j.logger.BlockStateChange=WARN
+
+#
+#Security appender
+#
+hadoop.security.logger=INFO,NullAppender
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+#
+# hadoop configuration logging
+#
+
+# Uncomment the following line to turn off configuration deprecation warnings.
+# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,NullAppender
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
+
+#
+# NameNode metrics logging.
+# The default is to retain two namenode-metrics.log files up to 64MB each.
+#
+namenode.metrics.logger=INFO,NullAppender
+log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
+log4j.additivity.NameNodeMetricsLog=false
+log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
+log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
+log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
+log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,NullAppender
+mapred.audit.log.maxfilesize=256MB
+mapred.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+# AWS SDK & S3A FileSystem
+log4j.logger.com.amazonaws=ERROR
+log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
+log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file :
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
+hadoop.mapreduce.jobsummary.log.maxbackupindex=20
+log4j.appender.JSA=org.apache.log4j.RollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+#
+# Yarn ResourceManager Application Summary Log 
+#
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# To enable AppSummaryLogging for the RM, 
+# set yarn.server.resourcemanager.appsummary.logger to 
+# <LEVEL>,RMSUMMARY in hadoop-env.sh
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+
+# HS audit log configs
+#mapreduce.hs.audit.logger=INFO,HSAUDIT
+#log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
+#log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
+#log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
+#log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
+#log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
+
+# Http Server Request Logs
+#log4j.logger.http.requests.namenode=INFO,namenoderequestlog
+#log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
+#log4j.appender.namenoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.datanode=INFO,datanoderequestlog
+#log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
+#log4j.appender.datanoderequestlog.RetainDays=3
+
+#log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
+#log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
+#log4j.appender.resourcemanagerrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
+#log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
+#log4j.appender.jobhistoryrequestlog.RetainDays=3
+
+#log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
+#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
+#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
+#log4j.appender.nodemanagerrequestlog.RetainDays=3
+
+# Appender for viewing information for errors and warnings
+yarn.ewma.cleanupInterval=300
+yarn.ewma.messageAgeLimitSeconds=86400
+yarn.ewma.maxUniqueMessages=250
+log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
+log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
+log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
+log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.cmd b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.cmd
new file mode 100644
index 0000000..0d39526
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.cmd
@@ -0,0 +1,20 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
+
+set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.sh b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.sh
new file mode 100644
index 0000000..6be1e27
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.sh
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+
+export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
+
+export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
+
+#export HADOOP_JOB_HISTORYSERVER_OPTS=
+#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
+#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
+#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
+#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
+#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-queues.xml.template
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-queues.xml.template b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-queues.xml.template
new file mode 100644
index 0000000..ce6cd20
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-queues.xml.template
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- This is the template for queue configuration. The format supports nesting of
+     queues within queues - a feature called hierarchical queues. All queues are
+     defined within the 'queues' tag which is the top level element for this
+     XML document. The queue acls configured here for different queues are
+     checked for authorization only if the configuration property
+     mapreduce.cluster.acls.enabled is set to true. -->
+<queues>
+
+  <!-- Configuration for a queue is specified by defining a 'queue' element. -->
+  <queue>
+
+    <!-- Name of a queue. Queue name cannot contain a ':'  -->
+    <name>default</name>
+
+    <!-- properties for a queue, typically used by schedulers,
+    can be defined here -->
+    <properties>
+    </properties>
+
+	<!-- State of the queue. If running, the queue will accept new jobs.
+         If stopped, the queue will not accept new jobs. -->
+    <state>running</state>
+
+    <!-- Specifies the ACLs to check for submitting jobs to this queue.
+         If set to '*', it allows all users to submit jobs to the queue.
+         If set to ' '(i.e. space), no user will be allowed to do this
+         operation. The default value for any queue acl is ' '.
+         For specifying a list of users and groups the format to use is
+         user1,user2 group1,group2
+
+         It is only used if authorization is enabled in Map/Reduce by setting
+         the configuration property mapreduce.cluster.acls.enabled to true.
+
+         Irrespective of this ACL configuration, the user who started the
+         cluster and cluster administrators configured via
+         mapreduce.cluster.administrators can do this operation. -->
+    <acl-submit-job> </acl-submit-job>
+
+    <!-- Specifies the ACLs to check for viewing and modifying jobs in this
+         queue. Modifications include killing jobs, tasks of jobs or changing
+         priorities.
+         If set to '*', it allows all users to view, modify jobs of the queue.
+         If set to ' '(i.e. space), no user will be allowed to do this
+         operation.
+         For specifying a list of users and groups the format to use is
+         user1,user2 group1,group2
+
+         It is only used if authorization is enabled in Map/Reduce by setting
+         the configuration property mapreduce.cluster.acls.enabled to true.
+
+         Irrespective of this ACL configuration, the user who started the
+         cluster  and cluster administrators configured via
+         mapreduce.cluster.administrators can do the above operations on all
+         the jobs in all the queues. The job owner can do all the above
+         operations on his/her job irrespective of this ACL configuration. -->
+    <acl-administer-jobs> </acl-administer-jobs>
+  </queue>
+
+  <!-- Here is a sample of a hierarchical queue configuration
+       where q2 is a child of q1. In this example, q2 is a leaf level
+       queue as it has no queues configured within it. Currently, ACLs
+       and state are only supported for the leaf level queues.
+       Note also the usage of properties for the queue q2.
+  <queue>
+    <name>q1</name>
+    <queue>
+      <name>q2</name>
+      <properties>
+        <property key="capacity" value="20"/>
+        <property key="user-limit" value="30"/>
+      </properties>
+    </queue>
+  </queue>
+ -->
+</queues>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-site.xml.template
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-site.xml.template b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-site.xml.template
new file mode 100644
index 0000000..761c352
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-site.xml.template
@@ -0,0 +1,21 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/slaves
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/slaves b/contrib/hawq-docker/centos7-docker/hawq-test/conf/slaves
new file mode 100644
index 0000000..2fbb50c
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/slaves
@@ -0,0 +1 @@
+localhost

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-client.xml.example
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-client.xml.example b/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-client.xml.example
new file mode 100644
index 0000000..a50dce4
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-client.xml.example
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+
+<property>
+  <name>ssl.client.truststore.location</name>
+  <value></value>
+  <description>Truststore to be used by clients like distcp. Must be
+  specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.truststore.password</name>
+  <value></value>
+  <description>Optional. Default value is "".
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.truststore.type</name>
+  <value>jks</value>
+  <description>Optional. The keystore file format, default value is "jks".
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.truststore.reload.interval</name>
+  <value>10000</value>
+  <description>Truststore reload check interval, in milliseconds.
+  Default value is 10000 (10 seconds).
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.keystore.location</name>
+  <value></value>
+  <description>Keystore to be used by clients like distcp. Must be
+  specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.keystore.password</name>
+  <value></value>
+  <description>Optional. Default value is "".
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.keystore.keypassword</name>
+  <value></value>
+  <description>Optional. Default value is "".
+  </description>
+</property>
+
+<property>
+  <name>ssl.client.keystore.type</name>
+  <value>jks</value>
+  <description>Optional. The keystore file format, default value is "jks".
+  </description>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-server.xml.example
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-server.xml.example b/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-server.xml.example
new file mode 100644
index 0000000..02d300c
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-server.xml.example
@@ -0,0 +1,78 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+
+<property>
+  <name>ssl.server.truststore.location</name>
+  <value></value>
+  <description>Truststore to be used by NN and DN. Must be specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.truststore.password</name>
+  <value></value>
+  <description>Optional. Default value is "".
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.truststore.type</name>
+  <value>jks</value>
+  <description>Optional. The keystore file format, default value is "jks".
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.truststore.reload.interval</name>
+  <value>10000</value>
+  <description>Truststore reload check interval, in milliseconds.
+  Default value is 10000 (10 seconds).
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.location</name>
+  <value></value>
+  <description>Keystore to be used by NN and DN. Must be specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.password</name>
+  <value></value>
+  <description>Must be specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.keypassword</name>
+  <value></value>
+  <description>Must be specified.
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.type</name>
+  <value>jks</value>
+  <description>Optional. The keystore file format, default value is "jks".
+  </description>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/conf/yarn-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/yarn-env.cmd b/contrib/hawq-docker/centos7-docker/hawq-test/conf/yarn-env.cmd
new file mode 100644
index 0000000..74da35b
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/yarn-env.cmd
@@ -0,0 +1,60 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem User for YARN daemons
+if not defined HADOOP_YARN_USER (
+  set HADOOP_YARN_USER=%yarn%
+)
+
+if not defined YARN_CONF_DIR (
+  set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf
+)
+
+if defined YARN_HEAPSIZE (
+  @rem echo run with Java heapsize %YARN_HEAPSIZE%
+  set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m
+)
+
+if not defined YARN_LOG_DIR (
+  set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs
+)
+
+if not defined YARN_LOGFILE (
+  set YARN_LOGFILE=yarn.log
+)
+
+@rem default policy file for service-level authorization
+if not defined YARN_POLICYFILE (
+  set YARN_POLICYFILE=hadoop-policy.xml
+)
+
+if not defined YARN_ROOT_LOGGER (
+  set YARN_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console
+)
+
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME%
+set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER%
+set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER%
+if defined JAVA_LIBRARY_PATH (
+  set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
+)
+set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE%
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh b/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
new file mode 100755
index 0000000..abdc508
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+if [ -z "${NAMENODE}" ]; then
+  export NAMENODE=${HOSTNAME}
+fi
+
+if [ ! -f /etc/profile.d/hadoop.sh ]; then
+  echo '#!/bin/bash' | sudo tee /etc/profile.d/hadoop.sh
+  echo "export NAMENODE=${NAMENODE}" | sudo tee -a /etc/profile.d/hadoop.sh
+  sudo chmod a+x /etc/profile.d/hadoop.sh
+fi
+
+sudo start-hdfs.sh
+sudo sysctl -p
+
+exec "$@"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh b/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
new file mode 100755
index 0000000..f39200d
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+/usr/sbin/sshd
+
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ "${NAMENODE}" == "${HOSTNAME}" ]; then
+  if [ ! -d /tmp/hdfs/name/current ]; then
+    su -l hdfs -c "hdfs namenode -format"
+  fi
+  
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.namenode.NameNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start namenode"
+  fi
+else
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.datanode.DataNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start datanode"
+  fi
+fi
+



[10/50] [abbrv] incubator-hawq git commit: HAWQ-1256. Enhance libcurl connection to Ranger Plugin Service, keep it as a long-live connection in session level

Posted by es...@apache.org.
HAWQ-1256. Enhance libcurl connection to Ranger Plugin Service, keep it as a long-live connection in session level


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/ad718734
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/ad718734
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/ad718734

Branch: refs/heads/2.1.0.0-incubating
Commit: ad718734898da2128ad47689243cc08043035573
Parents: 2f5910f
Author: stanlyxiang <st...@gmail.com>
Authored: Tue Jan 10 11:08:01 2017 +0800
Committer: Wen Lin <wl...@pivotal.io>
Committed: Wed Jan 11 15:51:32 2017 +0800

----------------------------------------------------------------------
 src/backend/libpq/rangerrest.c | 78 ++++++++++++++++---------------------
 src/backend/tcop/postgres.c    | 41 +++++++++++++++++++
 src/include/utils/rangerrest.h |  7 +++-
 3 files changed, 81 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/ad718734/src/backend/libpq/rangerrest.c
----------------------------------------------------------------------
diff --git a/src/backend/libpq/rangerrest.c b/src/backend/libpq/rangerrest.c
index 56d30b5..fd8937a 100644
--- a/src/backend/libpq/rangerrest.c
+++ b/src/backend/libpq/rangerrest.c
@@ -71,9 +71,11 @@ static void getClientIP(char *remote_host)
 RangerACLResult parse_ranger_response(char* buffer)
 {
 	if (buffer == NULL || strlen(buffer) == 0)
+	{
 		return RANGERCHECK_UNKNOWN;
+	}
 
-	elog(LOG, "read from Ranger Restful API: %s", buffer);
+	elog(DEBUG3, "parse ranger restful response content : %s", buffer);
 
 	struct json_object *response = json_tokener_parse(buffer);
 	if (response == NULL) 
@@ -90,7 +92,7 @@ RangerACLResult parse_ranger_response(char* buffer)
 	}
 
 	int arraylen = json_object_array_length(accessObj);
-	elog(LOG, "Array Length: %d",arraylen);
+	elog(DEBUG3, "parse ranger response result array length: %d",arraylen);
 
 	// here should return which table's acl check failed in future.
 	for (int i=0; i< arraylen; i++){
@@ -161,7 +163,7 @@ json_object *create_ranger_request_json(List *args)
 		AclObjectKind kind = arg_ptr->kind;
 		char* object = arg_ptr->object;
 		Assert(user != NULL && object != NULL && privilege != NULL && arg_ptr->isAll);
-		elog(LOG, "build json for ranger request, user:%s, kind:%s, object:%s",
+		elog(DEBUG3, "build json for ranger restful request, user:%s, kind:%s, object:%s",
 				user, AclObjectKindStr[kind], object);
 
 		json_object *jelement = json_object_new_object();
@@ -281,29 +283,29 @@ static size_t write_callback(char *contents, size_t size, size_t nitems,
 	CURL_HANDLE curl = (CURL_HANDLE) userp;
 	Assert(curl != NULL);
 
-	if (curl->response.buffer == NULL) 
+	elog(DEBUG3, "ranger restful response size is %d. response buffer size is %d.", curl->response.response_size, curl->response.buffer_size);
+	int original_size = curl->response.buffer_size;
+	while(curl->response.response_size + realsize >= curl->response.buffer_size)
 	{
-		curl->response.buffer = palloc0(realsize + 1);
+		/*double the buffer size if the buffer is not enough.*/
+		curl->response.buffer_size = curl->response.buffer_size * 2;
 	}
-	else 
+	if(original_size < curl->response.buffer_size)
 	{
-		/*Note:*/
-		/*our repalloc is not same as realloc, repalloc's first param(buffer) can not be NULL*/
-		curl->response.buffer = repalloc(curl->response.buffer, curl->response.size + realsize + 1);
+		/* our repalloc is not same as realloc, repalloc's first param(buffer) can not be NULL */
+		curl->response.buffer = repalloc(curl->response.buffer, curl->response.buffer_size);
 	}
-
+	elog(DEBUG3, "ranger restful response size is %d. response buffer size is %d.", curl->response.response_size, curl->response.buffer_size);
 	if (curl->response.buffer == NULL)
 	{
 		/* out of memory! */
 		elog(WARNING, "not enough memory for Ranger response");
 		return 0;
 	}
-
-	memcpy(curl->response.buffer + curl->response.size, contents, realsize);
-	curl->response.size += realsize;
-	curl->response.buffer[curl->response.size] = '\0';
-	elog(LOG, "read from Ranger Restful API: %s", curl->response.buffer);
-
+	memcpy(curl->response.buffer + curl->response.response_size, contents, realsize);
+	elog(DEBUG3, "read from ranger restful response: %s", curl->response.buffer);
+	curl->response.response_size += realsize;
+	curl->response.buffer[curl->response.response_size] = '\0';
 	return realsize;
 }
 
@@ -316,15 +318,14 @@ int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
 	CURLcode res;
 	Assert(request != NULL);
 
-	curl_global_init(CURL_GLOBAL_ALL);
-
-	/* init the curl session */
-	curl_handle->curl_handle = curl_easy_init();
-	if (curl_handle->curl_handle == NULL)
-	{
-		goto _exit;
-	}
-
+	/*
+	 * Re-initializes all options previously set on a specified CURL handle
+	 * to the default values. This puts back the handle to the same state as
+	 * it was in when it was just created with curl_easy_init.It does not
+	 * change the following information kept in the handle: live connections,
+	 * the Session ID cache, the DNS cache, the cookies and shares.
+	 */
+	curl_easy_reset(curl_handle->curl_handle);
 	/* timeout: hard-coded temporarily and maybe should be a guc in future */
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_TIMEOUT, 30L);
 
@@ -364,19 +365,10 @@ int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
 	else
 	{
 		ret = 0;
-		elog(LOG, "%d bytes retrieved from Ranger Restful API.",
-			curl_handle->response.size);
-	}
-
-_exit:
-	/* cleanup curl stuff */
-	if (curl_handle->curl_handle)
-	{
-		curl_easy_cleanup(curl_handle->curl_handle);
+		elog(DEBUG3, "retrieved %d bytes from ranger restful response.",
+			curl_handle->response.response_size);
 	}
 
-	/* we're done with libcurl, so clean it up */
-	curl_global_cleanup();
 	return ret;
 }
 
@@ -388,13 +380,11 @@ int check_privilege_from_ranger(List *arg_list)
 	json_object* jrequest = create_ranger_request_json(arg_list);
 	Assert(jrequest != NULL);
 	const char *request = json_object_to_json_string(jrequest);
-	elog(LOG, "Send JSON request to Ranger: %s", request);
+	elog(DEBUG3, "send json request to ranger : %s", request);
 	Assert(request != NULL);
-	struct curl_context_t curl_context;
-	memset(&curl_context, 0, sizeof(struct curl_context_t));
 
 	/* call GET method to send request*/
-	if (call_ranger_rest(&curl_context, request) < 0)
+	if (call_ranger_rest(&curl_context_ranger, request) < 0)
 	{
 		return RANGERCHECK_NO_PRIV;
 	}
@@ -403,11 +393,11 @@ int check_privilege_from_ranger(List *arg_list)
 	json_object_put(jrequest);
 
 	/* parse the JSON-format result */
-	RangerACLResult ret = parse_ranger_response(curl_context.response.buffer);
-	/* free response buffer */
-	if (curl_context.response.buffer != NULL)
+	RangerACLResult ret = parse_ranger_response(curl_context_ranger.response.buffer);
+	if (curl_context_ranger.response.buffer != NULL)
 	{
-		pfree(curl_context.response.buffer);
+		/* reset response size to reuse the buffer. */
+		curl_context_ranger.response.response_size = 0;
 	}
 
 	return ret;

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/ad718734/src/backend/tcop/postgres.c
----------------------------------------------------------------------
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 5a8327e..c8d7e33 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -114,6 +114,8 @@
 
 #include "cdb/cdbinmemheapam.h"
 
+#include "utils/rangerrest.h"
+
 #include "resourcemanager/dynrm.h"
 #include "resourcemanager/envswitch.h"
 #include "resourcemanager/communication/rmcomm_QD2RM.h"
@@ -133,6 +135,7 @@ extern char *optarg;
 extern char *savedSeqServerHost;
 extern int savedSeqServerPort;
 
+struct curl_context_t curl_context_ranger;
 /* ----------------
  *		global variables
  * ----------------
@@ -266,6 +269,7 @@ static void log_disconnections(int code, Datum arg);
 static bool renice_current_process(int nice_level);
 static int getSlaveHostNumber(FILE *fp);
 static bool CheckSlaveFile();
+static void curl_finalize(int code, Datum arg);
 
 /*saved interrupt global variable for client_read_xxx functions*/
 static bool SavedImmediateInterruptOK = false;
@@ -4626,6 +4630,25 @@ PostgresMain(int argc, char *argv[], const char *username)
 	if (!ignore_till_sync)
 		send_ready_for_query = true;	/* initially, or after error */
 
+	/* for enable ranger*/
+	if (AmIMaster() && enable_ranger && !curl_context_ranger.hasInited)
+	{
+		memset(&curl_context_ranger, 0, sizeof(curl_context_t));
+		curl_global_init(CURL_GLOBAL_ALL);
+		/* init the curl session */
+		curl_context_ranger.curl_handle = curl_easy_init();
+		if (curl_context_ranger.curl_handle == NULL) {
+			/* cleanup curl stuff */
+			/* no need to cleanup curl_handle since it's null. just cleanup curl global.*/
+			curl_global_cleanup();
+			elog(ERROR, "initialize global curl context failed.");
+		}
+		curl_context_ranger.hasInited = true;
+		curl_context_ranger.response.buffer = palloc0(CURL_RES_BUFFER_SIZE);
+		curl_context_ranger.response.buffer_size = CURL_RES_BUFFER_SIZE;
+		elog(DEBUG3, "initialize global curl context for privileges check.");
+		on_proc_exit(curl_finalize, 0);
+	}
 	/*
 	 * Non-error queries loop here.
 	 */
@@ -5314,6 +5337,24 @@ PostgresMain(int argc, char *argv[], const char *username)
 	return 1;					/* keep compiler quiet */
 }
 
+static void
+curl_finalize(int code, Datum arg __MAYBE_UNUSED)
+{
+	if (AmIMaster() && curl_context_ranger.hasInited)
+	{
+		if (curl_context_ranger.response.buffer != NULL) {
+			pfree(curl_context_ranger.response.buffer);
+		}
+		/* cleanup curl stuff */
+		if (curl_context_ranger.curl_handle) {
+			curl_easy_cleanup(curl_context_ranger.curl_handle);
+		}
+		/* we're done with libcurl, so clean it up */
+		curl_global_cleanup();
+		curl_context_ranger.hasInited = false;
+		elog(DEBUG3, "finalize the global struct for curl handle context.");
+	}
+}
 
 /*
  * Obtain platform stack depth limit (in bytes)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/ad718734/src/include/utils/rangerrest.h
----------------------------------------------------------------------
diff --git a/src/include/utils/rangerrest.h b/src/include/utils/rangerrest.h
index 692c832..f67d8e5 100644
--- a/src/include/utils/rangerrest.h
+++ b/src/include/utils/rangerrest.h
@@ -37,6 +37,7 @@
 #include "tcop/tcopprot.h"
 
 #define HOST_BUFFER_SIZE 1025
+#define CURL_RES_BUFFER_SIZE 1024
 
 typedef enum
 {
@@ -59,10 +60,13 @@ typedef struct curl_context_t
   struct
   {
     char* buffer;
-    int size;
+    int response_size;
+    int buffer_size;
   } response;
 
   char* last_http_reponse;
+
+  bool hasInited;
 } curl_context_t;
 
 typedef curl_context_t* CURL_HANDLE;
@@ -94,5 +98,6 @@ RangerACLResult parse_ranger_response(char *);
 json_object *create_ranger_request_json(List *);
 int call_ranger_rest(CURL_HANDLE curl_handle, const char *request);
 extern int check_privilege_from_ranger(List *);
+extern struct curl_context_t curl_context_ranger;
 
 #endif


[31/50] [abbrv] incubator-hawq git commit: HAWQ-1203. Ranger Plugin Service Implementation. (with contributions by Lav Jain and Leslie Chang) (close #1092)

Posted by es...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListSchemasTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListSchemasTest.java b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListSchemasTest.java
new file mode 100644
index 0000000..94372aa
--- /dev/null
+++ b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListSchemasTest.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific schema governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.admin;
+
+import com.google.common.collect.Sets;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Set;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Arrays;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ListSchemasTest extends LookupTestBase {
+
+    private static final Set<String> DEFAULT_SCHEMAS = Sets.newHashSet("public");
+    private static final Set<String> EAST_SCHEMAS = Sets.newHashSet("common", "japan", "public");
+    private static final Set<String> WEST_SCHEMAS = Sets.newHashSet("common", "france", "jamaica", "public");
+    private static final Set<String> ALL_SCHEMAS = Sets.newHashSet("common", "japan", "france", "jamaica", "public");
+
+    private Map<String, List<String>> resources;
+
+    @Before
+    public void setUp() {
+        resources = new HashMap<>();
+    }
+
+    @Test
+    public void testListSchema_NoResources() throws Exception {
+        resources.put("database", Arrays.asList("noschema_db"));
+        List<String> result = service.lookupResource(getContext("schema", "*", resources));
+        assertEquals(DEFAULT_SCHEMAS.size(), result.size());
+        assertEquals(DEFAULT_SCHEMAS, Sets.newHashSet(result));
+    }
+
+    @Test
+    public void testListSchemas_SingleDb_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        List<String> result = service.lookupResource(getContext("schema", "*", resources));
+        assertEquals(EAST_SCHEMAS.size(), result.size());
+        assertEquals(EAST_SCHEMAS, Sets.newHashSet(result));
+    }
+
+    @Test
+    public void testListSchemas_TwoDb_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        List<String> result = service.lookupResource(getContext("schema", "*", resources));
+        assertEquals(ALL_SCHEMAS.size(), result.size());
+        assertEquals(ALL_SCHEMAS, Sets.newHashSet(result));
+    }
+
+    @Test
+    public void testListSchemas_AllDb_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("schema", "*", resources));
+        assertEquals(ALL_SCHEMAS.size(), result.size());
+        assertEquals(ALL_SCHEMAS, Sets.newHashSet(result));
+    }
+
+    @Test
+    public void testListSchemas_SingleDb_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        List<String> result = service.lookupResource(getContext("schema", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSchemas_TwoDb_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        List<String> result = service.lookupResource(getContext("schema", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSchemas_AllDb_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("schema", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSchemas_SingleDb_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        List<String> result = service.lookupResource(getContext("schema", "ja", resources));
+        assertEquals(1, result.size());
+        assertEquals("japan", result.get(0));
+    }
+
+    @Test
+    public void testListSchemas_TwoDb_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        List<String> result = service.lookupResource(getContext("schema", "ja", resources));
+        assertEquals(2, result.size());
+        assertEquals(Sets.newHashSet("japan", "jamaica"), Sets.newHashSet(result));
+    }
+
+    @Test
+    public void testListSchemas_AllDb_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("schema", "ja", resources));
+        assertEquals(2, result.size());
+        assertEquals(Sets.newHashSet("japan", "jamaica"), Sets.newHashSet(result));
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListSequencesTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListSequencesTest.java b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListSequencesTest.java
new file mode 100644
index 0000000..0c601c2
--- /dev/null
+++ b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListSequencesTest.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific schema governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.admin;
+
+import com.google.common.collect.Sets;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Arrays;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ListSequencesTest extends LookupTestBase {
+
+    private Map<String, List<String>> resources;
+
+    @Before
+    public void setUp() {
+        resources = new HashMap<>();
+    }
+
+    @Test
+    public void testListSequences_NoSchemaDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("noschema_db"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("sequence", "*", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSequences_SingleDb_SingleSchema_AllFilter_NoSequences() throws Exception {
+        resources.put("database", Arrays.asList("west"));
+        resources.put("schema", Arrays.asList("jamaica"));
+        List<String> result = service.lookupResource(getContext("sequence", "*", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSequences_SingleDb_SingleSchema_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("sequence", "*", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("water", "sake")));
+    }
+
+    @Test
+    public void testListSequences_SingleDb_TwoSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("common", "japan"));
+        List<String> result = service.lookupResource(getContext("sequence", "*", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("water", "sprite", "sake")));
+    }
+
+    @Test
+    public void testListSequences_SingleDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("sequence", "*", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("water", "sprite", "sake")));
+    }
+
+    @Test
+    public void testListSequences_TwoDb_CommonSchema_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("common"));
+        List<String> result = service.lookupResource(getContext("sequence", "*", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("water", "sprite")));
+    }
+
+    @Test
+    public void testListSequences_TwoDb_SingleSchema_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("sequence", "*", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("water", "sake")));
+    }
+
+    @Test
+    public void testListSequences_TwoDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("sequence", "*", resources));
+        assertEquals(4, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("water", "sprite", "sake", "scotch")));
+    }
+
+    @Test
+    public void testListSequences_AllDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("sequence", "*", resources));
+        assertEquals(4, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("water", "sprite", "sake", "scotch")));
+    }
+
+    @Test
+    public void testListSequences_SingleDb_SingleSchema_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("sequence", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSequences_SingleDb_TwoSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("common", "japan"));
+        List<String> result = service.lookupResource(getContext("sequence", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSequences_SingleDb_AllSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("sequence", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSequences_TwoDbs_CommonSchema_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("common"));
+        List<String> result = service.lookupResource(getContext("sequence", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSequences_TwoDbs_SingleSchema_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("sequence", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSequences_TwoDbs_AllSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("sequence", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSequences_AllDbs_AllSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("sequence", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListSequences_SingleDb_SingleSchema_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("sequence", "s", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sake")));
+    }
+
+    @Test
+    public void testListSequences_SingleDb_TwoSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("common", "japan"));
+        List<String> result = service.lookupResource(getContext("sequence", "s", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sprite", "sake")));
+    }
+
+    @Test
+    public void testListSequences_SingleDb_AllSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("sequence", "s", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sprite", "sake")));
+    }
+
+    @Test
+    public void testListSequences_SingleDb_AllSchemas_FilteredPresent2() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("sequence", "w", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("water")));
+    }
+
+    @Test
+    public void testListSequences_TwoDbs_CommonSchema_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("common"));
+        List<String> result = service.lookupResource(getContext("sequence", "w", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("water")));
+    }
+
+    @Test
+    public void testListSequences_TwoDbs_SingleSchema_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("sequence", "s", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sake")));
+    }
+
+    @Test
+    public void testListSequences_TwoDbs_AllSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("sequence", "s", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sprite", "sake", "scotch")));
+    }
+
+    @Test
+    public void testListSequences_AllDbs_AllSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("sequence", "s", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sprite", "sake", "scotch")));
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListTablesTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListTablesTest.java b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListTablesTest.java
new file mode 100644
index 0000000..1360cac
--- /dev/null
+++ b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListTablesTest.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific schema governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.admin;
+
+import com.google.common.collect.Sets;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Arrays;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ListTablesTest extends LookupTestBase {
+
+    private Map<String, List<String>> resources;
+
+    @Before
+    public void setUp() {
+        resources = new HashMap<>();
+    }
+
+    @Test
+    public void testListTables_NoSchemaDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("noschema_db"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("table", "*", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListTables_SingleDb_SingleSchema_AllFilter_NoTables() throws Exception {
+        resources.put("database", Arrays.asList("west"));
+        resources.put("schema", Arrays.asList("jamaica"));
+        List<String> result = service.lookupResource(getContext("table", "*", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListTables_SingleDb_SingleSchema_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("table", "*", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("rice", "sushi")));
+    }
+
+    @Test
+    public void testListTables_SingleDb_TwoSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("common", "japan"));
+        List<String> result = service.lookupResource(getContext("table", "*", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("rice", "soup", "sushi")));
+    }
+
+    @Test
+    public void testListTables_SingleDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("table", "*", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("rice", "soup", "sushi")));
+    }
+
+    @Test
+    public void testListTables_TwoDb_CommonSchema_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("common"));
+        List<String> result = service.lookupResource(getContext("table", "*", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("rice", "soup")));
+    }
+
+    @Test
+    public void testListTables_TwoDb_SingleSchema_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("table", "*", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("rice", "sushi")));
+    }
+
+    @Test
+    public void testListTables_TwoDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("table", "*", resources));
+        assertEquals(4, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("rice", "soup", "sushi", "stew")));
+    }
+
+    @Test
+    public void testListTables_AllDb_AllSchemas_AllFilter() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("table", "*", resources));
+        assertEquals(4, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("rice", "soup", "sushi", "stew")));
+    }
+
+    @Test
+    public void testListTables_SingleDb_SingleSchema_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("table", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListTables_SingleDb_TwoSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("common", "japan"));
+        List<String> result = service.lookupResource(getContext("table", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListTables_SingleDb_AllSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("table", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListTables_TwoDbs_CommonSchema_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("common"));
+        List<String> result = service.lookupResource(getContext("table", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListTables_TwoDbs_SingleSchema_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("table", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListTables_TwoDbs_AllSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("table", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListTables_AllDbs_AllSchemas_FilteredAbsent() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("table", "z", resources));
+        assertTrue(result.isEmpty());
+    }
+
+    @Test
+    public void testListTables_SingleDb_SingleSchema_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("table", "s", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sushi")));
+    }
+
+    @Test
+    public void testListTables_SingleDb_TwoSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("common", "japan"));
+        List<String> result = service.lookupResource(getContext("table", "s", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("soup", "sushi")));
+    }
+
+    @Test
+    public void testListTables_SingleDb_AllSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("table", "s", resources));
+        assertEquals(2, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("soup", "sushi")));
+    }
+
+    @Test
+    public void testListTables_SingleDb_AllSchemas_FilteredPresent2() throws Exception {
+        resources.put("database", Arrays.asList("east"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("table", "r", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("rice")));
+    }
+
+    @Test
+    public void testListTables_TwoDbs_CommonSchema_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("common"));
+        List<String> result = service.lookupResource(getContext("table", "r", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("rice")));
+    }
+
+    @Test
+    public void testListTables_TwoDbs_SingleSchema_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("japan"));
+        List<String> result = service.lookupResource(getContext("table", "s", resources));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("sushi")));
+    }
+
+    @Test
+    public void testListTables_TwoDbs_AllSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("east", "west"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("table", "s", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("soup", "sushi", "stew")));
+    }
+
+    @Test
+    public void testListTables_AllDbs_AllSchemas_FilteredPresent() throws Exception {
+        resources.put("database", Arrays.asList("*"));
+        resources.put("schema", Arrays.asList("*"));
+        List<String> result = service.lookupResource(getContext("table", "s", resources));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("soup", "sushi", "stew")));
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListTablespacesTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListTablespacesTest.java b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListTablespacesTest.java
new file mode 100644
index 0000000..65048db
--- /dev/null
+++ b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/ListTablespacesTest.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.admin;
+
+import com.google.common.collect.Sets;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ListTablespacesTest extends LookupTestBase {
+
+    private static final Set<String> TABLESPACES = Sets.newHashSet("pg_default", "pg_global", "dfs_default");
+
+    @Test
+    public void testListTablespace_All() throws Exception {
+        List<String> result = service.lookupResource(getContext("tablespace", "*"));
+        assertEquals(3, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet(TABLESPACES)));
+    }
+
+    @Test
+    public void testListTablespace_FilteredPresent() throws Exception {
+        List<String> result = service.lookupResource(getContext("tablespace", "pg_d"));
+        assertEquals(1, result.size());
+        assertTrue(Sets.newHashSet(result).equals(Sets.newHashSet("pg_default")));
+    }
+
+    @Test
+    public void testListTablespace_FilteredAbsent() throws Exception {
+        List<String> result = service.lookupResource(getContext("tablespace", "z"));
+        assertTrue(result.isEmpty());
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/LookupTestBase.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/LookupTestBase.java b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/LookupTestBase.java
new file mode 100644
index 0000000..25265f3
--- /dev/null
+++ b/ranger-plugin/integration/admin/src/test/java/org/apache/hawq/ranger/integration/admin/LookupTestBase.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.admin;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hawq.ranger.service.RangerServiceHawq;
+import org.apache.ranger.plugin.service.ResourceLookupContext;
+import org.junit.Before;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public abstract class LookupTestBase {
+
+    protected static final Log LOG = LogFactory.getLog(LookupTestBase.class);
+    protected RangerServiceHawq service;
+
+    @Before
+    public void setup() {
+        Map<String, String> configs = new HashMap<>();
+        configs = new HashMap<>();
+        configs.put("username", "gpadmin");
+        configs.put("password", "dQSF8ViAE4/I38xmFwJfCg==");
+        configs.put("hostname", "localhost");
+        configs.put("port", "5432");
+        configs.put("jdbc.driverClassName", "org.postgresql.Driver");
+
+        service = new RangerServiceHawq();
+        service.setServiceName("hawq");
+        service.setServiceType("hawq");
+        service.setConfigs(configs);
+    }
+
+    protected ResourceLookupContext getContext(String resourceName, String userInput) {
+        ResourceLookupContext context = new ResourceLookupContext();
+        context.setResourceName(resourceName);
+        context.setUserInput(userInput);
+        return context;
+    }
+
+    protected ResourceLookupContext getContext(String resourceName, String userInput, Map<String, List<String>> resources) {
+        ResourceLookupContext context = getContext(resourceName, userInput);
+        context.setResources(resources);
+        return context;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/src/test/resources/admin-tests-ddl.sql
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/src/test/resources/admin-tests-ddl.sql b/ranger-plugin/integration/admin/src/test/resources/admin-tests-ddl.sql
new file mode 100644
index 0000000..d9e7fcc
--- /dev/null
+++ b/ranger-plugin/integration/admin/src/test/resources/admin-tests-ddl.sql
@@ -0,0 +1,61 @@
+-- Licensed to the Apache Software Foundation (ASF) under one
+-- or more contributor license agreements.  See the NOTICE file
+-- distributed with this work for additional information
+-- regarding copyright ownership.  The ASF licenses this file
+-- to you under the Apache License, Version 2.0 (the
+-- "License"); you may not use this file except in compliance
+-- with the License.  You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing,
+-- software distributed under the License is distributed on an
+-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+-- KIND, either express or implied.  See the License for the
+-- specific language governing permissions and limitations
+-- under the License.
+
+-- EAST Database and its objects
+DROP DATABASE IF EXISTS east;
+CREATE DATABASE east;
+\c east;
+CREATE SCHEMA common;
+CREATE TABLE common.rice (id integer);
+CREATE TABLE common.soup (id integer);
+CREATE SEQUENCE common.water;
+CREATE SEQUENCE common.sprite;
+CREATE FUNCTION common.eat(integer) RETURNS integer AS 'select $1;' LANGUAGE SQL;
+CREATE FUNCTION common.sleep(integer) RETURNS integer AS 'select $1;' LANGUAGE SQL;
+CREATE SCHEMA japan;
+CREATE TABLE japan.rice (id integer);
+CREATE TABLE japan.sushi (id integer);
+CREATE SEQUENCE japan.water;
+CREATE SEQUENCE japan.sake;
+CREATE FUNCTION japan.eat(integer) RETURNS integer AS 'select $1;' LANGUAGE SQL;
+CREATE FUNCTION japan.stand(integer) RETURNS integer AS 'select $1;' LANGUAGE SQL;
+CREATE LANGUAGE langdbeast HANDLER plpgsql_call_handler;
+
+-- WEST Database and its objects
+DROP DATABASE IF EXISTS west;
+CREATE DATABASE west;
+\c west;
+CREATE SCHEMA common;
+CREATE TABLE common.rice (id integer);
+CREATE TABLE common.soup (id integer);
+CREATE SEQUENCE common.water;
+CREATE SEQUENCE common.sprite;
+CREATE FUNCTION common.eat(integer) RETURNS integer AS 'select $1;' LANGUAGE SQL;
+CREATE FUNCTION common.sleep(integer) RETURNS integer AS 'select $1;' LANGUAGE SQL;
+CREATE SCHEMA france;
+CREATE TABLE france.rice (id integer);
+CREATE TABLE france.stew (id integer);
+CREATE SEQUENCE france.water;
+CREATE SEQUENCE france.scotch;
+CREATE FUNCTION france.eat(integer) RETURNS integer AS 'select $1;' LANGUAGE SQL;
+CREATE FUNCTION france.smile(integer) RETURNS integer AS 'select $1;' LANGUAGE SQL;
+CREATE LANGUAGE langdbwest HANDLER plpgsql_call_handler;
+CREATE SCHEMA jamaica;
+
+-- Database without an explicit schema
+DROP DATABASE IF EXISTS noschema_db;
+CREATE DATABASE noschema_db;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/admin/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/admin/src/test/resources/log4j.properties b/ranger-plugin/integration/admin/src/test/resources/log4j.properties
new file mode 100644
index 0000000..903f0b6
--- /dev/null
+++ b/ranger-plugin/integration/admin/src/test/resources/log4j.properties
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+##-- To prevent junits from cluttering the build run by default all test runs send output to null appender
+log4j.appender.devnull=org.apache.log4j.varia.NullAppender
+#hawq.ranger.root.logger=FATAL,devnull
+
+##-- uncomment the following line during during development/debugging so see debug messages during test run to be emitted to console
+hawq.ranger.root.logger=DEBUG,console
+log4j.rootLogger=${hawq.ranger.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/pom.xml
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/pom.xml b/ranger-plugin/integration/pom.xml
new file mode 100644
index 0000000..b6aac80
--- /dev/null
+++ b/ranger-plugin/integration/pom.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.apache.hawq</groupId>
+    <artifactId>ranger-plugin-integration</artifactId>
+    <packaging>pom</packaging>
+    <name>HAWQ Ranger Plugin - Integration Tests</name>
+    <description>HAWQ Ranger Plugin - Integration Tests</description>
+
+    <parent>
+        <groupId>org.apache.hawq</groupId>
+        <artifactId>ranger-plugin</artifactId>
+        <version>2.1.0.0</version>
+        <relativePath>..</relativePath>
+    </parent>
+
+    <modules>
+        <module>admin</module>
+        <module>service</module>
+    </modules>
+
+    <properties>
+        <jackson.version>1.9</jackson.version>
+    </properties>
+
+    <build>
+        <testResources>
+            <testResource>
+                <directory>src/test/resources</directory>
+                <includes>
+                    <include>**/*</include>
+                </includes>
+                <filtering>true</filtering>
+            </testResource>
+        </testResources>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-compiler-plugin</artifactId>
+                <configuration>
+                    <source>1.7</source>
+                    <target>1.7</target>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/pom.xml
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/pom.xml b/ranger-plugin/integration/service/pom.xml
new file mode 100644
index 0000000..34ade8d
--- /dev/null
+++ b/ranger-plugin/integration/service/pom.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <groupId>org.apache.hawq</groupId>
+    <artifactId>ranger-plugin-integration-service</artifactId>
+    <packaging>jar</packaging>
+    <name>HAWQ Ranger Plugin - Integration Tests</name>
+    <description>HAWQ Ranger Plugin - Integration Tests</description>
+
+    <parent>
+        <groupId>org.apache.hawq</groupId>
+        <artifactId>ranger-plugin-integration</artifactId>
+        <version>2.1.0.0</version>
+        <relativePath>..</relativePath>
+    </parent>
+
+    <dependencies>
+        <dependency>
+            <groupId>log4j</groupId>
+            <artifactId>log4j</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>postgresql</groupId>
+            <artifactId>postgresql</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-io</artifactId>
+            <version>1.3.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.httpcomponents</groupId>
+            <artifactId>httpclient</artifactId>
+            <version>4.5.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.codehaus.jackson</groupId>
+            <artifactId>jackson-mapper-asl</artifactId>
+            <version>1.9.13</version>
+        </dependency>
+
+        <!-- Test Dependencies -->
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+        </dependency>
+    </dependencies>
+
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/DatabaseTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/DatabaseTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/DatabaseTest.java
new file mode 100644
index 0000000..451a289
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/DatabaseTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class DatabaseTest extends ServiceBaseTest {
+
+    private static final List<String> PRIVILEGES = Arrays.asList("connect", "temp");
+
+    public void beforeTest()
+            throws IOException {
+        createPolicy("test-database.json");
+        resources.put("database", "sirotan");
+    }
+
+    @Test
+    public void testDatabases_UserMaria_SirotanDb_Allowed()
+            throws IOException {
+        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testDatabases_UserMaria_DoesNotExistDb_Denied()
+            throws IOException {
+        resources.put("database", "doesnotexist");
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testDatabases_UserBob_SirotanDb_Denied()
+            throws IOException {
+        assertFalse(hasAccess("bob", resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testDatabases_UserMaria_SirotanDb_Denied()
+            throws IOException {
+        deletePolicy();
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/FunctionTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/FunctionTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/FunctionTest.java
new file mode 100644
index 0000000..1253c38
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/FunctionTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class FunctionTest extends ServiceBaseTest {
+
+    private static final List<String> PRIVILEGES = Arrays.asList("execute");
+
+    public void beforeTest()
+            throws IOException {
+        createPolicy("test-function.json");
+        resources.put("database", "sirotan");
+        resources.put("schema", "siroschema");
+        resources.put("function", "atan");
+    }
+
+    @Test
+    public void testFunctions_UserMaria_SirotanDb_AtanFunction_Allowed()
+            throws IOException {
+        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testFunctions_UserMaria_OtherDb_AtanFunction_Denied()
+            throws IOException {
+        resources.put("database", "other");
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testFunctions_UserMaria_SirotanDb_DoesNotExistFunction_Denied()
+            throws IOException {
+        resources.put("function", "doesnotexist");
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testFunctions_UserBob_SirotanDb_AtanFunction_Denied()
+            throws IOException {
+        assertFalse(hasAccess("bob", resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testFunctions_UserMaria_SirotanDb_AtanFunction_Denied()
+            throws IOException {
+        deletePolicy();
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testFunctions_UserMaria_DoesNotExistDb_AtanFunction_Denied()
+            throws IOException {
+        resources.put("database", "doesnotexist");
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testFunctions_UserMaria_SirotanDb_AtanFunction_Policy2_Allowed()
+            throws IOException {
+        deletePolicy();
+        createPolicy("test-function-2.json");
+        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/LanguageTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/LanguageTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/LanguageTest.java
new file mode 100644
index 0000000..6eedb08
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/LanguageTest.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class LanguageTest extends ServiceBaseTest {
+
+    private static final List<String> PRIVILEGES = Arrays.asList("usage");
+
+    public void beforeTest()
+            throws IOException {
+        createPolicy("test-language.json");
+        resources.put("database", "sirotan");
+        resources.put("language", "sql");
+    }
+
+    @Test
+    public void testLanguages_UserMaria_SirotanDb_SqlLanguage_Allowed()
+            throws IOException {
+        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testLanguages_UserMaria_SirotanDb_DoesNotExistLanguage_Denied()
+            throws IOException {
+        resources.put("language", "doesnotexist");
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testLanguages_UserBob_SirotanDb_SqlLanguage_Denied()
+            throws IOException {
+        assertFalse(hasAccess("bob", resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testLanguages_UserMaria_SirotanDb_SqlLanguage_Denied()
+            throws IOException {
+        deletePolicy();
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testLanguages_UserMaria_DoesNotExistDb_SqlLanguage_Denied()
+            throws IOException {
+        resources.put("database", "doesnotexist");
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testLanguages_UserMaria_SirotanDb_SqlLanguage_Policy2_Allowed()
+            throws IOException {
+        deletePolicy();
+        createPolicy("test-language-2.json");
+        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ProtocolTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ProtocolTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ProtocolTest.java
new file mode 100644
index 0000000..f0e5c99
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ProtocolTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class ProtocolTest extends ServiceBaseTest {
+
+    private static final List<String> PRIVILEGES = Arrays.asList("select", "insert");
+
+    public void beforeTest()
+            throws IOException {
+        createPolicy("test-protocol.json");
+        resources.put("protocol", "pxf");
+    }
+
+    @Test
+    public void testProtocols_UserMaria_PxfProtocol_Allowed()
+            throws IOException {
+        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testProtocols_UserMaria_DoesNotExistProtocol_Denied()
+            throws IOException {
+        resources.put("protocol", "doesnotexist");
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testProtocols_UserBob_PxfProtocol_Denied()
+            throws IOException {
+        assertFalse(hasAccess("bob", resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testProtocols_UserMaria_PxfProtocol_Denied()
+            throws IOException {
+        deletePolicy();
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSRequest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSRequest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSRequest.java
new file mode 100644
index 0000000..7e7787a
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSRequest.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class RPSRequest {
+
+    String user;
+    Map<String, String> resources;
+    List<String> privileges;
+
+    public RPSRequest(String user,
+                      Map<String, String> resources,
+                      List<String> privileges) {
+        this.user = user;
+        this.resources = resources;
+        this.privileges = privileges;
+    }
+
+    public String getJsonString()
+            throws IOException {
+
+        Map<String, Object> request = new HashMap<>();
+        request.put("requestId", 9);
+        request.put("user", user);
+        request.put("clientIp", "123.0.0.21");
+        request.put("context", "CREATE DATABASE sirotan;");
+        Map<String, Object> accessHash = new HashMap<>();
+        accessHash.put("resource", resources);
+        accessHash.put("privileges", privileges);
+        request.put("access", Arrays.asList(accessHash));
+        return new ObjectMapper().writeValueAsString(request);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSResponse.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSResponse.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSResponse.java
new file mode 100644
index 0000000..2ed1046
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/RPSResponse.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.codehaus.jackson.annotate.JsonProperty;
+
+import java.util.List;
+import java.util.Map;
+
+public class RPSResponse {
+
+    @JsonProperty
+    public int requestId;
+
+    @JsonProperty
+    public List<Map<String, Object>> access;
+
+    public List<Map<String, Object>> getAccess() {
+        return access;
+    }
+
+    public boolean hasAccess() {
+        return (boolean) access.get(0).get("allowed");
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ServiceBaseTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ServiceBaseTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ServiceBaseTest.java
new file mode 100644
index 0000000..8608584
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/ServiceBaseTest.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.http.client.methods.HttpDelete;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.entity.StringEntity;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public abstract class ServiceBaseTest {
+
+    protected final Log log = LogFactory.getLog(this.getClass());
+
+    @Rule
+    public final TestName testName = new TestName();
+    protected final String policyName = getClass().getSimpleName();
+    protected Map<String, String> resources = new HashMap<>();
+
+    public static String RANGER_PLUGIN_SERVICE_HOST = "localhost";
+    public static String RANGER_PLUGIN_SERVICE_PORT = "8432";
+    public static String RANGER_PLUGIN_SERVICE_URL =
+        "http://" + RANGER_PLUGIN_SERVICE_HOST + ":" + RANGER_PLUGIN_SERVICE_PORT + "/rps";
+    public static String RANGER_ADMIN_HOST = "localhost";
+    public static String RANGER_ADMIN_PORT = "6080";
+    public static String RANGER_URL =
+        "http://" + RANGER_ADMIN_HOST + ":" + RANGER_ADMIN_PORT + "/service/public/v2/api";
+    public static String RANGER_TEST_USER = "maria_dev";
+    public static int    POLICY_REFRESH_INTERVAL = 6000;
+
+    @Before
+    public void setUp()
+            throws IOException {
+        log.info("======================================================================================");
+        log.info("Running test " + testName.getMethodName());
+        log.info("======================================================================================");
+        beforeTest();
+    }
+
+    @After
+    public void tearDown()
+            throws IOException {
+        deletePolicy();
+    }
+
+    protected void createPolicy(String jsonFile)
+            throws IOException {
+
+        log.info("Creating policy " + policyName);
+        HttpPost httpPost = new HttpPost(RANGER_URL + "/policy");
+        httpPost.setEntity(new StringEntity(Utils.getPayload(jsonFile)));
+        Utils.processHttpRequest(httpPost);
+        waitForPolicyRefresh();
+    }
+
+    protected void deletePolicy()
+            throws IOException {
+
+        log.info("Deleting policy " + policyName);
+        String requestUrl = RANGER_URL + "/policy?servicename=hawq&policyname=" + policyName;
+        Utils.processHttpRequest(new HttpDelete(requestUrl));
+        waitForPolicyRefresh();
+    }
+
+    protected boolean hasAccess(String user,
+                                Map<String, String> resources,
+                                List<String> privileges)
+            throws IOException {
+
+        log.info("Checking access for user " + user);
+        RPSRequest request = new RPSRequest(user, resources, privileges);
+        HttpPost httpPost = new HttpPost(RANGER_PLUGIN_SERVICE_URL);
+        httpPost.setEntity(new StringEntity(request.getJsonString()));
+        String result = Utils.processHttpRequest(httpPost);
+        RPSResponse rpsResponse = Utils.getResponse(result);
+        return rpsResponse.hasAccess();
+    }
+
+    private void waitForPolicyRefresh() {
+
+        try {
+            Thread.sleep(POLICY_REFRESH_INTERVAL);
+        }
+        catch (InterruptedException e) {
+            log.error(e);
+        }
+    }
+
+    public abstract void beforeTest() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TablespaceTest.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TablespaceTest.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TablespaceTest.java
new file mode 100644
index 0000000..cfc41cb
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/TablespaceTest.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TablespaceTest extends ServiceBaseTest {
+
+    private static final List<String> PRIVILEGES = Arrays.asList("create");
+
+    public void beforeTest()
+            throws IOException {
+        createPolicy("test-tablespace.json");
+        resources.put("tablespace", "pg_global");
+    }
+
+    @Test
+    public void testTablespaces_UserMaria_PgGlobalTablespace_Allowed()
+            throws IOException {
+        assertTrue(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testTablespaces_UserMaria_DoesNotExistTablespace_Denied()
+            throws IOException {
+        resources.put("tablespace", "doesnotexist");
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testTablespaces_UserBob_PgGlobalTablespace_Denied()
+            throws IOException {
+        assertFalse(hasAccess("bob", resources, PRIVILEGES));
+    }
+
+    @Test
+    public void testTablespaces_UserMaria_PgGlobalTablespace_Denied()
+            throws IOException {
+        deletePolicy();
+        assertFalse(hasAccess(RANGER_TEST_USER, resources, PRIVILEGES));
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/Utils.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/Utils.java b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/Utils.java
new file mode 100644
index 0000000..971e513
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/java/org/apache/hawq/ranger/integration/service/tests/Utils.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.ranger.integration.service.tests;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.HttpRequestBase;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import java.io.IOException;
+
+public class Utils {
+
+    protected static final Log log = LogFactory.getLog(Utils.class);
+
+    public static String getPayload(String jsonFile)
+            throws IOException {
+        return IOUtils.toString(Utils.class.getClassLoader().getResourceAsStream(jsonFile));
+    }
+
+    public static String getEncoding() {
+        return Base64.encodeBase64String("admin:admin".getBytes());
+    }
+
+    public static String processHttpRequest(HttpRequestBase request)
+            throws IOException {
+
+        if (log.isDebugEnabled()) {
+            log.debug("Request URI = " + request.getURI().toString());
+        }
+        request.setHeader("Authorization", "Basic " + getEncoding());
+        request.setHeader("Content-Type", "application/json");
+        HttpClient httpClient = HttpClientBuilder.create().build();
+        HttpResponse response = httpClient.execute(request);
+        int responseCode = response.getStatusLine().getStatusCode();
+        log.info("Response Code = " + responseCode);
+        HttpEntity entity = response.getEntity();
+        if (entity != null) {
+            String result = IOUtils.toString(entity.getContent());
+            if (log.isDebugEnabled()) {
+                log.debug(result);
+            }
+            return result;
+        }
+        return null;
+    }
+
+    public static RPSResponse getResponse(String result)
+            throws IOException {
+        return new ObjectMapper().readValue(result, RPSResponse.class);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/log4j.properties b/ranger-plugin/integration/service/src/test/resources/log4j.properties
new file mode 100644
index 0000000..8578fd2
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/resources/log4j.properties
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+##-- To prevent junits from cluttering the build run by default all test runs send output to null appender
+log4j.appender.devnull=org.apache.log4j.varia.NullAppender
+#hawq.ranger.root.logger=FATAL,devnull
+
+##-- uncomment the following line during during development/debugging so see debug messages during test run to be emitted to console
+hawq.ranger.root.logger=DEBUG,console
+log4j.rootLogger=${hawq.ranger.root.logger}
+log4j.logger.org.apache.http=WARN
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/resources/test-database.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-database.json b/ranger-plugin/integration/service/src/test/resources/test-database.json
new file mode 100644
index 0000000..ffa3bfe
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/resources/test-database.json
@@ -0,0 +1,46 @@
+{
+  "isEnabled": true,
+  "service": "hawq",
+  "name": "DatabaseTest",
+  "policyType": 0,
+  "description": "Test policy for database resource",
+  "isAuditEnabled": true,
+  "resources": {
+    "schema": {
+      "values": ["*"],
+      "isExcludes": false,
+      "isRecursive": false
+    },
+    "database": {
+      "values": ["sirotan"],
+      "isExcludes": false,
+      "isRecursive": false
+    },
+    "function": {
+      "values": ["*"],
+      "isExcludes": false,
+      "isRecursive": false
+    }
+  },
+  "policyItems": [{
+    "accesses": [{
+      "type": "create",
+      "isAllowed": true
+    }, {
+      "type": "connect",
+      "isAllowed": true
+    }, {
+      "type": "temp",
+      "isAllowed": true
+    }],
+    "users": ["maria_dev"],
+    "groups": [],
+    "conditions": [],
+    "delegateAdmin": true
+  }],
+  "denyPolicyItems": [],
+  "allowExceptions": [],
+  "denyExceptions": [],
+  "dataMaskPolicyItems": [],
+  "rowFilterPolicyItems": []
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/resources/test-function-2.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-function-2.json b/ranger-plugin/integration/service/src/test/resources/test-function-2.json
new file mode 100644
index 0000000..5ae7f0b
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/resources/test-function-2.json
@@ -0,0 +1,40 @@
+{
+  "isEnabled": true,
+  "service": "hawq",
+  "name": "FunctionTest",
+  "policyType": 0,
+  "description": "Test policy for function resource",
+  "isAuditEnabled": true,
+  "resources": {
+    "schema": {
+      "values": ["*"],
+      "isExcludes": false,
+      "isRecursive": false
+    },
+    "database": {
+      "values": ["*"],
+      "isExcludes": false,
+      "isRecursive": false
+    },
+    "function": {
+      "values": ["atan"],
+      "isExcludes": false,
+      "isRecursive": false
+    }
+  },
+  "policyItems": [{
+    "accesses": [{
+      "type": "execute",
+      "isAllowed": true
+    }],
+    "users": ["maria_dev"],
+    "groups": [],
+    "conditions": [],
+    "delegateAdmin": true
+  }],
+  "denyPolicyItems": [],
+  "allowExceptions": [],
+  "denyExceptions": [],
+  "dataMaskPolicyItems": [],
+  "rowFilterPolicyItems": []
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/resources/test-function.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-function.json b/ranger-plugin/integration/service/src/test/resources/test-function.json
new file mode 100644
index 0000000..74d5d83
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/resources/test-function.json
@@ -0,0 +1,40 @@
+{
+  "isEnabled": true,
+  "service": "hawq",
+  "name": "FunctionTest",
+  "policyType": 0,
+  "description": "Test policy for function resource",
+  "isAuditEnabled": true,
+  "resources": {
+    "schema": {
+      "values": ["siroschema"],
+      "isExcludes": false,
+      "isRecursive": false
+    },
+    "database": {
+      "values": ["sirotan"],
+      "isExcludes": false,
+      "isRecursive": false
+    },
+    "function": {
+      "values": ["atan"],
+      "isExcludes": false,
+      "isRecursive": false
+    }
+  },
+  "policyItems": [{
+    "accesses": [{
+      "type": "execute",
+      "isAllowed": true
+    }],
+    "users": ["maria_dev"],
+    "groups": [],
+    "conditions": [],
+    "delegateAdmin": true
+  }],
+  "denyPolicyItems": [],
+  "allowExceptions": [],
+  "denyExceptions": [],
+  "dataMaskPolicyItems": [],
+  "rowFilterPolicyItems": []
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/resources/test-language-2.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-language-2.json b/ranger-plugin/integration/service/src/test/resources/test-language-2.json
new file mode 100644
index 0000000..93a41fe
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/resources/test-language-2.json
@@ -0,0 +1,35 @@
+{
+  "isEnabled": true,
+  "service": "hawq",
+  "name": "LanguageTest",
+  "policyType": 0,
+  "description": "Test policy for language resource",
+  "isAuditEnabled": true,
+  "resources": {
+    "language": {
+      "values": ["sql"],
+      "isExcludes": false,
+      "isRecursive": false
+    },
+    "database": {
+      "values": ["*"],
+      "isExcludes": false,
+      "isRecursive": false
+    }
+  },
+  "policyItems": [{
+    "accesses": [{
+      "type": "usage",
+      "isAllowed": true
+    }],
+    "users": ["maria_dev"],
+    "groups": [],
+    "conditions": [],
+    "delegateAdmin": true
+  }],
+  "denyPolicyItems": [],
+  "allowExceptions": [],
+  "denyExceptions": [],
+  "dataMaskPolicyItems": [],
+  "rowFilterPolicyItems": []
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/resources/test-language.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-language.json b/ranger-plugin/integration/service/src/test/resources/test-language.json
new file mode 100644
index 0000000..cba2f43
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/resources/test-language.json
@@ -0,0 +1,35 @@
+{
+  "isEnabled": true,
+  "service": "hawq",
+  "name": "LanguageTest",
+  "policyType": 0,
+  "description": "Test policy for language resource",
+  "isAuditEnabled": true,
+  "resources": {
+    "language": {
+      "values": ["sql"],
+      "isExcludes": false,
+      "isRecursive": false
+    },
+    "database": {
+      "values": ["sirotan"],
+      "isExcludes": false,
+      "isRecursive": false
+    }
+  },
+  "policyItems": [{
+    "accesses": [{
+      "type": "usage",
+      "isAllowed": true
+    }],
+    "users": ["maria_dev"],
+    "groups": [],
+    "conditions": [],
+    "delegateAdmin": true
+  }],
+  "denyPolicyItems": [],
+  "allowExceptions": [],
+  "denyExceptions": [],
+  "dataMaskPolicyItems": [],
+  "rowFilterPolicyItems": []
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/resources/test-protocol.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-protocol.json b/ranger-plugin/integration/service/src/test/resources/test-protocol.json
new file mode 100644
index 0000000..d59caed
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/resources/test-protocol.json
@@ -0,0 +1,33 @@
+{
+  "isEnabled": true,
+  "service": "hawq",
+  "name": "ProtocolTest",
+  "policyType": 0,
+  "description": "Test policy for protocol resource",
+  "isAuditEnabled": true,
+  "resources": {
+    "protocol": {
+      "values": ["pxf"],
+      "isExcludes": false,
+      "isRecursive": false
+    }
+  },
+  "policyItems": [{
+    "accesses": [{
+      "type": "select",
+      "isAllowed": true
+    }, {
+      "type": "insert",
+      "isAllowed": true
+    }],
+    "users": ["maria_dev"],
+    "groups": [],
+    "conditions": [],
+    "delegateAdmin": true
+  }],
+  "denyPolicyItems": [],
+  "allowExceptions": [],
+  "denyExceptions": [],
+  "dataMaskPolicyItems": [],
+  "rowFilterPolicyItems": []
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7f36b35b/ranger-plugin/integration/service/src/test/resources/test-tablespace.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/integration/service/src/test/resources/test-tablespace.json b/ranger-plugin/integration/service/src/test/resources/test-tablespace.json
new file mode 100644
index 0000000..a45ecea
--- /dev/null
+++ b/ranger-plugin/integration/service/src/test/resources/test-tablespace.json
@@ -0,0 +1,30 @@
+{
+  "isEnabled": true,
+  "service": "hawq",
+  "name": "TablespaceTest",
+  "policyType": 0,
+  "description": "Test policy for tablespace resource",
+  "isAuditEnabled": true,
+  "resources": {
+    "tablespace": {
+      "values": ["pg_global"],
+      "isExcludes": false,
+      "isRecursive": false
+    }
+  },
+  "policyItems": [{
+    "accesses": [{
+      "type": "create",
+      "isAllowed": true
+    }],
+    "users": ["maria_dev"],
+    "groups": [],
+    "conditions": [],
+    "delegateAdmin": true
+  }],
+  "denyPolicyItems": [],
+  "allowExceptions": [],
+  "denyExceptions": [],
+  "dataMaskPolicyItems": [],
+  "rowFilterPolicyItems": []
+}
\ No newline at end of file



[21/50] [abbrv] incubator-hawq git commit: Revert "HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base."

Posted by es...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/log4j.properties b/contrib/hawq-docker/centos6-docker/hawq-test/conf/log4j.properties
deleted file mode 100644
index c901ab1..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/log4j.properties
+++ /dev/null
@@ -1,291 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshold=ALL
-
-# Null Appender
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Rolling File Appender - cap space usage at 5gb.
-#
-hadoop.log.maxfilesize=256MB
-hadoop.log.maxbackupindex=20
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
-log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollover at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# HDFS block state change log from block manager
-#
-# Uncomment the following to suppress normal block state change
-# messages from BlockManager in NameNode.
-#log4j.logger.BlockStateChange=WARN
-
-#
-#Security appender
-#
-hadoop.security.logger=INFO,NullAppender
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth-${user.name}.audit
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# Daily Rolling Security appender
-#
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-#
-# hadoop configuration logging
-#
-
-# Uncomment the following line to turn off configuration deprecation warnings.
-# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,NullAppender
-hdfs.audit.log.maxfilesize=256MB
-hdfs.audit.log.maxbackupindex=20
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
-log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
-
-#
-# NameNode metrics logging.
-# The default is to retain two namenode-metrics.log files up to 64MB each.
-#
-namenode.metrics.logger=INFO,NullAppender
-log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
-log4j.additivity.NameNodeMetricsLog=false
-log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
-log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
-log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
-log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
-log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,NullAppender
-mapred.audit.log.maxfilesize=256MB
-mapred.audit.log.maxbackupindex=20
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
-log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
-
-# Custom Logging levels
-
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-# AWS SDK & S3A FileSystem
-log4j.logger.com.amazonaws=ERROR
-log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
-log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file :
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
-hadoop.mapreduce.jobsummary.log.maxbackupindex=20
-log4j.appender.JSA=org.apache.log4j.RollingFileAppender
-log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
-log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
-log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
-log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
-
-#
-# Yarn ResourceManager Application Summary Log 
-#
-# Set the ResourceManager summary log filename
-yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
-# Set the ResourceManager summary log level and appender
-yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-# To enable AppSummaryLogging for the RM, 
-# set yarn.server.resourcemanager.appsummary.logger to 
-# <LEVEL>,RMSUMMARY in hadoop-env.sh
-
-# Appender for ResourceManager Application Summary Log
-# Requires the following properties to be set
-#    - hadoop.log.dir (Hadoop Log directory)
-#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
-log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-
-# HS audit log configs
-#mapreduce.hs.audit.logger=INFO,HSAUDIT
-#log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
-#log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
-#log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
-#log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
-#log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
-#log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
-
-# Http Server Request Logs
-#log4j.logger.http.requests.namenode=INFO,namenoderequestlog
-#log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
-#log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
-#log4j.appender.namenoderequestlog.RetainDays=3
-
-#log4j.logger.http.requests.datanode=INFO,datanoderequestlog
-#log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
-#log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
-#log4j.appender.datanoderequestlog.RetainDays=3
-
-#log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
-#log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
-#log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
-#log4j.appender.resourcemanagerrequestlog.RetainDays=3
-
-#log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
-#log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
-#log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
-#log4j.appender.jobhistoryrequestlog.RetainDays=3
-
-#log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
-#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
-#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
-#log4j.appender.nodemanagerrequestlog.RetainDays=3
-
-# Appender for viewing information for errors and warnings
-yarn.ewma.cleanupInterval=300
-yarn.ewma.messageAgeLimitSeconds=86400
-yarn.ewma.maxUniqueMessages=250
-log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
-log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
-log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
-log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.cmd b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.cmd
deleted file mode 100644
index 0d39526..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.cmd
+++ /dev/null
@@ -1,20 +0,0 @@
-@echo off
-@rem Licensed to the Apache Software Foundation (ASF) under one or more
-@rem contributor license agreements.  See the NOTICE file distributed with
-@rem this work for additional information regarding copyright ownership.
-@rem The ASF licenses this file to You under the Apache License, Version 2.0
-@rem (the "License"); you may not use this file except in compliance with
-@rem the License.  You may obtain a copy of the License at
-@rem
-@rem     http://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-
-set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
-
-set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.sh b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.sh
deleted file mode 100644
index 6be1e27..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-env.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-
-export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
-
-export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
-
-#export HADOOP_JOB_HISTORYSERVER_OPTS=
-#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
-#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
-#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
-#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
-#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-queues.xml.template
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-queues.xml.template b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-queues.xml.template
deleted file mode 100644
index ce6cd20..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-queues.xml.template
+++ /dev/null
@@ -1,92 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- This is the template for queue configuration. The format supports nesting of
-     queues within queues - a feature called hierarchical queues. All queues are
-     defined within the 'queues' tag which is the top level element for this
-     XML document. The queue acls configured here for different queues are
-     checked for authorization only if the configuration property
-     mapreduce.cluster.acls.enabled is set to true. -->
-<queues>
-
-  <!-- Configuration for a queue is specified by defining a 'queue' element. -->
-  <queue>
-
-    <!-- Name of a queue. Queue name cannot contain a ':'  -->
-    <name>default</name>
-
-    <!-- properties for a queue, typically used by schedulers,
-    can be defined here -->
-    <properties>
-    </properties>
-
-	<!-- State of the queue. If running, the queue will accept new jobs.
-         If stopped, the queue will not accept new jobs. -->
-    <state>running</state>
-
-    <!-- Specifies the ACLs to check for submitting jobs to this queue.
-         If set to '*', it allows all users to submit jobs to the queue.
-         If set to ' '(i.e. space), no user will be allowed to do this
-         operation. The default value for any queue acl is ' '.
-         For specifying a list of users and groups the format to use is
-         user1,user2 group1,group2
-
-         It is only used if authorization is enabled in Map/Reduce by setting
-         the configuration property mapreduce.cluster.acls.enabled to true.
-
-         Irrespective of this ACL configuration, the user who started the
-         cluster and cluster administrators configured via
-         mapreduce.cluster.administrators can do this operation. -->
-    <acl-submit-job> </acl-submit-job>
-
-    <!-- Specifies the ACLs to check for viewing and modifying jobs in this
-         queue. Modifications include killing jobs, tasks of jobs or changing
-         priorities.
-         If set to '*', it allows all users to view, modify jobs of the queue.
-         If set to ' '(i.e. space), no user will be allowed to do this
-         operation.
-         For specifying a list of users and groups the format to use is
-         user1,user2 group1,group2
-
-         It is only used if authorization is enabled in Map/Reduce by setting
-         the configuration property mapreduce.cluster.acls.enabled to true.
-
-         Irrespective of this ACL configuration, the user who started the
-         cluster  and cluster administrators configured via
-         mapreduce.cluster.administrators can do the above operations on all
-         the jobs in all the queues. The job owner can do all the above
-         operations on his/her job irrespective of this ACL configuration. -->
-    <acl-administer-jobs> </acl-administer-jobs>
-  </queue>
-
-  <!-- Here is a sample of a hierarchical queue configuration
-       where q2 is a child of q1. In this example, q2 is a leaf level
-       queue as it has no queues configured within it. Currently, ACLs
-       and state are only supported for the leaf level queues.
-       Note also the usage of properties for the queue q2.
-  <queue>
-    <name>q1</name>
-    <queue>
-      <name>q2</name>
-      <properties>
-        <property key="capacity" value="20"/>
-        <property key="user-limit" value="30"/>
-      </properties>
-    </queue>
-  </queue>
- -->
-</queues>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-site.xml.template
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-site.xml.template b/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-site.xml.template
deleted file mode 100644
index 761c352..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/mapred-site.xml.template
+++ /dev/null
@@ -1,21 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/slaves
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/slaves b/contrib/hawq-docker/centos6-docker/hawq-test/conf/slaves
deleted file mode 100644
index 2fbb50c..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/slaves
+++ /dev/null
@@ -1 +0,0 @@
-localhost

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-client.xml.example
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-client.xml.example b/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-client.xml.example
deleted file mode 100644
index a50dce4..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-client.xml.example
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-
-<property>
-  <name>ssl.client.truststore.location</name>
-  <value></value>
-  <description>Truststore to be used by clients like distcp. Must be
-  specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.truststore.password</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.truststore.type</name>
-  <value>jks</value>
-  <description>Optional. The keystore file format, default value is "jks".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.truststore.reload.interval</name>
-  <value>10000</value>
-  <description>Truststore reload check interval, in milliseconds.
-  Default value is 10000 (10 seconds).
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.location</name>
-  <value></value>
-  <description>Keystore to be used by clients like distcp. Must be
-  specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.password</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.keypassword</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.type</name>
-  <value>jks</value>
-  <description>Optional. The keystore file format, default value is "jks".
-  </description>
-</property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-server.xml.example
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-server.xml.example b/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-server.xml.example
deleted file mode 100644
index 02d300c..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/ssl-server.xml.example
+++ /dev/null
@@ -1,78 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-
-<property>
-  <name>ssl.server.truststore.location</name>
-  <value></value>
-  <description>Truststore to be used by NN and DN. Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.truststore.password</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.truststore.type</name>
-  <value>jks</value>
-  <description>Optional. The keystore file format, default value is "jks".
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.truststore.reload.interval</name>
-  <value>10000</value>
-  <description>Truststore reload check interval, in milliseconds.
-  Default value is 10000 (10 seconds).
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.location</name>
-  <value></value>
-  <description>Keystore to be used by NN and DN. Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.password</name>
-  <value></value>
-  <description>Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.keypassword</name>
-  <value></value>
-  <description>Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.type</name>
-  <value>jks</value>
-  <description>Optional. The keystore file format, default value is "jks".
-  </description>
-</property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/yarn-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/yarn-env.cmd b/contrib/hawq-docker/centos6-docker/hawq-test/conf/yarn-env.cmd
deleted file mode 100644
index 74da35b..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/yarn-env.cmd
+++ /dev/null
@@ -1,60 +0,0 @@
-@echo off
-@rem Licensed to the Apache Software Foundation (ASF) under one or more
-@rem contributor license agreements.  See the NOTICE file distributed with
-@rem this work for additional information regarding copyright ownership.
-@rem The ASF licenses this file to You under the Apache License, Version 2.0
-@rem (the "License"); you may not use this file except in compliance with
-@rem the License.  You may obtain a copy of the License at
-@rem
-@rem     http://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-
-@rem User for YARN daemons
-if not defined HADOOP_YARN_USER (
-  set HADOOP_YARN_USER=%yarn%
-)
-
-if not defined YARN_CONF_DIR (
-  set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf
-)
-
-if defined YARN_HEAPSIZE (
-  @rem echo run with Java heapsize %YARN_HEAPSIZE%
-  set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m
-)
-
-if not defined YARN_LOG_DIR (
-  set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs
-)
-
-if not defined YARN_LOGFILE (
-  set YARN_LOGFILE=yarn.log
-)
-
-@rem default policy file for service-level authorization
-if not defined YARN_POLICYFILE (
-  set YARN_POLICYFILE=hadoop-policy.xml
-)
-
-if not defined YARN_ROOT_LOGGER (
-  set YARN_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console
-)
-
-set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR%
-set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR%
-set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE%
-set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE%
-set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME%
-set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING%
-set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME%
-set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER%
-set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER%
-if defined JAVA_LIBRARY_PATH (
-  set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
-)
-set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE%
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh b/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
deleted file mode 100755
index 2c03287..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-if [ -z "${NAMENODE}" ]; then
-  export NAMENODE=${HOSTNAME}
-fi
-
-if [ ! -f /etc/profile.d/hadoop.sh ]; then
-  echo '#!/bin/bash' | sudo tee /etc/profile.d/hadoop.sh
-  echo "export NAMENODE=${NAMENODE}" | sudo tee -a /etc/profile.d/hadoop.sh
-  sudo chmod a+x /etc/profile.d/hadoop.sh
-fi
-
-sudo start-hdfs.sh
-sudo sysctl -p
-sudo ln -s /usr/lib/libthrift-0.9.1.so /usr/lib64/libthrift-0.9.1.so
-
-exec "$@"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh b/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
deleted file mode 100755
index 076fb0a..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/etc/init.d/sshd start
-
-if [ -f /etc/profile.d/hadoop.sh ]; then
-  . /etc/profile.d/hadoop.sh
-fi
-
-if [ "${NAMENODE}" == "${HOSTNAME}" ]; then
-  if [ ! -d /tmp/hdfs/name/current ]; then
-    su -l hdfs -c "hdfs namenode -format"
-  fi
-  
-  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.namenode.NameNode | grep -v grep`" ]; then
-    su -l hdfs -c "hadoop-daemon.sh start namenode"
-  fi
-else
-  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.datanode.DataNode | grep -v grep`" ]; then
-    su -l hdfs -c "hadoop-daemon.sh start datanode"
-  fi
-fi
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
deleted file mode 100644
index 58d4ef0..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
+++ /dev/null
@@ -1,75 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-FROM centos:7
-
-MAINTAINER Richard Guo <ri...@pivotal.io>
-
-# install all software we need
-RUN yum install -y epel-release && \
- yum makecache && \
- yum install -y man passwd sudo tar which git mlocate links make bzip2 net-tools \
- autoconf automake libtool m4 gcc gcc-c++ gdb bison flex cmake gperf maven indent \
- libuuid-devel krb5-devel libgsasl-devel expat-devel libxml2-devel \
- perl-ExtUtils-Embed pam-devel python-devel libcurl-devel snappy-devel \
- thrift-devel libyaml-devel libevent-devel bzip2-devel openssl-devel \
- openldap-devel protobuf-devel readline-devel net-snmp-devel apr-devel \
- libesmtp-devel python-pip json-c-devel \
- java-1.7.0-openjdk-devel lcov cmake \
- openssh-clients openssh-server perl-JSON && \
- yum clean all
-
-RUN pip --retries=50 --timeout=300 install pycrypto
-
-# OS requirement
-RUN echo "kernel.sem = 250 512000 100 2048" >> /etc/sysctl.conf
-
-# setup ssh server and keys for root
-RUN sshd-keygen && \
- ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
- cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
- chmod 0600 ~/.ssh/authorized_keys
-
-# create user gpadmin since HAWQ cannot run under root
-RUN groupadd -g 1000 gpadmin && \
- useradd -u 1000 -g 1000 gpadmin && \
- echo "gpadmin  ALL=(ALL)       NOPASSWD: ALL" > /etc/sudoers.d/gpadmin
-
-# sudo should not require tty
-RUN sed -i -e 's|Defaults    requiretty|#Defaults    requiretty|' /etc/sudoers
-
-# setup JAVA_HOME for all users
-RUN echo "#!/bin/sh" > /etc/profile.d/java.sh && \
- echo "export JAVA_HOME=/etc/alternatives/java_sdk" >> /etc/profile.d/java.sh && \
- chmod a+x /etc/profile.d/java.sh
-
-# set USER env
-RUN echo "#!/bin/bash" > /etc/profile.d/user.sh && \
- echo "export USER=\`whoami\`" >> /etc/profile.d/user.sh && \
- chmod a+x /etc/profile.d/user.sh
-
-ENV BASEDIR /data
-RUN mkdir -p /data && chmod 777 /data
-
-USER gpadmin
-
-# setup ssh client keys for gpadmin
-RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
- cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
- chmod 0600 ~/.ssh/authorized_keys
-
-WORKDIR /data

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
deleted file mode 100644
index ea5e22c..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-FROM hawq/hawq-dev:centos7
-
-MAINTAINER Richard Guo <ri...@pivotal.io>
-
-USER root
-
-## install HDP 2.5.0
-RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.0.0/hdp.repo" -o /etc/yum.repos.d/hdp.repo && \
- yum install -y hadoop hadoop-hdfs hadoop-libhdfs hadoop-yarn hadoop-mapreduce hadoop-client hdp-select && \
- yum clean all
-
-RUN ln -s /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh /usr/bin/hadoop-daemon.sh
-
-COPY conf/* /etc/hadoop/conf/
-
-COPY entrypoint.sh /usr/bin/entrypoint.sh
-COPY start-hdfs.sh /usr/bin/start-hdfs.sh
-
-USER gpadmin
-
-ENTRYPOINT ["entrypoint.sh"]
-CMD ["bash"]
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/capacity-scheduler.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/capacity-scheduler.xml
deleted file mode 100644
index 30f4eb9..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/capacity-scheduler.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<configuration>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.1</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.resource-calculator</name>
-    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-    <description>
-      The ResourceCalculator implementation to be used to compare 
-      Resources in the scheduler.
-      The default i.e. DefaultResourceCalculator only uses Memory while
-      DominantResourceCalculator uses dominant-resource to compare 
-      multi-dimensional resources such as Memory, CPU etc.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      Number of missed scheduling opportunities after which the CapacityScheduler 
-      attempts to schedule rack-local containers. 
-      Typically this should be set to number of nodes in the cluster, By default is setting 
-      approximately number of nodes in one rack which is 40.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.queue-mappings</name>
-    <value></value>
-    <description>
-      A list of mappings that will be used to assign jobs to queues
-      The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
-      Typically this list will be used to map users to queues,
-      for example, u:%user:%user maps all users to queues with the same name
-      as the user.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
-    <value>false</value>
-    <description>
-      If a queue mapping is present, will it override the value specified
-      by the user? This can be used by administrators to place jobs in queues
-      that are different than the one specified by the user.
-      The default is false.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/configuration.xsl
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/configuration.xsl b/contrib/hawq-docker/centos7-docker/hawq-test/conf/configuration.xsl
deleted file mode 100644
index d50d80b..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/configuration.xsl
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-<tr>
-  <td><a name="{name}"><xsl:value-of select="name"/></a></td>
-  <td><xsl:value-of select="value"/></td>
-  <td><xsl:value-of select="description"/></td>
-</tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/container-executor.cfg
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/container-executor.cfg b/contrib/hawq-docker/centos7-docker/hawq-test/conf/container-executor.cfg
deleted file mode 100644
index d68cee8..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/container-executor.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group
-banned.users=#comma separated list of users who can not run applications
-min.user.id=1000#Prevent other super-users
-allowed.system.users=##comma separated list of system users who CAN run applications

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
deleted file mode 100644
index afc37fc..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-	<property>
-		<name>fs.defaultFS</name>
-		<value>hdfs://${hdfs.namenode}:8020</value>
-	</property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.cmd b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.cmd
deleted file mode 100644
index bb40ec9..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.cmd
+++ /dev/null
@@ -1,92 +0,0 @@
-@echo off
-@rem Licensed to the Apache Software Foundation (ASF) under one or more
-@rem contributor license agreements.  See the NOTICE file distributed with
-@rem this work for additional information regarding copyright ownership.
-@rem The ASF licenses this file to You under the Apache License, Version 2.0
-@rem (the "License"); you may not use this file except in compliance with
-@rem the License.  You may obtain a copy of the License at
-@rem
-@rem     http://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-
-@rem Set Hadoop-specific environment variables here.
-
-@rem The only required environment variable is JAVA_HOME.  All others are
-@rem optional.  When running a distributed configuration it is best to
-@rem set JAVA_HOME in this file, so that it is correctly defined on
-@rem remote nodes.
-
-@rem The java implementation to use.  Required.
-set JAVA_HOME=%JAVA_HOME%
-
-@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
-@rem set JSVC_HOME=%JSVC_HOME%
-
-@rem set HADOOP_CONF_DIR=
-
-@rem Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
-if exist %HADOOP_HOME%\contrib\capacity-scheduler (
-  if not defined HADOOP_CLASSPATH (
-    set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
-  ) else (
-    set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
-  )
-)
-
-@rem If TEZ_CLASSPATH is defined in the env, that means that TEZ is enabled
-@rem append it to the HADOOP_CLASSPATH
-
-if defined TEZ_CLASSPATH (
-  if not defined HADOOP_CLASSPATH (
-    set HADOOP_CLASSPATH=%TEZ_CLASSPATH%
-  ) else (
-    set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%TEZ_CLASSPATH%
-  )
-)
-
-@rem The maximum amount of heap to use, in MB. Default is 1000.
-@rem set HADOOP_HEAPSIZE=
-@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
-
-@rem Extra Java runtime options.  Empty by default.
-@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
-
-@rem Command specific options appended to HADOOP_OPTS when specified
-if not defined HADOOP_SECURITY_LOGGER (
-  set HADOOP_SECURITY_LOGGER=INFO,RFAS
-)
-if not defined HDFS_AUDIT_LOGGER (
-  set HDFS_AUDIT_LOGGER=INFO,NullAppender
-)
-
-set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
-set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
-set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
-
-@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
-@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
-
-@rem On secure datanodes, user to run the datanode as after dropping privileges
-set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
-
-@rem Where log files are stored.  %HADOOP_HOME%/logs by default.
-@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
-
-@rem Where log files are stored in the secure data environment.
-set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
-
-@rem The directory where pid files are stored. /tmp by default.
-@rem NOTE: this should be set to a directory that can only be written to by 
-@rem       the user that will run the hadoop daemons.  Otherwise there is the
-@rem       potential for a symlink attack.
-set HADOOP_PID_DIR=%HADOOP_PID_DIR%
-set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
-
-@rem A string representing this instance of hadoop. %USERNAME% by default.
-set HADOOP_IDENT_STRING=%USERNAME%

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
deleted file mode 100644
index 95511ed..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
+++ /dev/null
@@ -1,110 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.
-export JAVA_HOME=/etc/alternatives/java_sdk
-
-# The jsvc implementation to use. Jsvc is required to run secure datanodes
-# that bind to privileged ports to provide authentication of data transfer
-# protocol.  Jsvc is not required if SASL is configured for authentication of
-# data transfer protocol using non-privileged ports.
-#export JSVC_HOME=${JSVC_HOME}
-
-#export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
-
-# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
-#for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
-#  if [ "$HADOOP_CLASSPATH" ]; then
-#    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
-#  else
-#    export HADOOP_CLASSPATH=$f
-#  fi
-#done
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-#export HADOOP_HEAPSIZE=
-#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
-
-# Setup environment variable for docker image
-if [ -f /etc/profile.d/hadoop.sh ]; then
-  . /etc/profile.d/hadoop.sh
-fi
-
-if [ -z "${NAMENODE}" ]; then
-  echo "environment variable NAMENODE is not set!"
-  exit 1
-fi
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Dhdfs.namenode=${NAMENODE}"
-#export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
-
-# Command specific options appended to HADOOP_OPTS when specified
-#export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
-#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
-
-#export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
-
-#export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
-#export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-#export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
-#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
-
-# On secure datanodes, user to run the datanode as after dropping privileges.
-# This **MUST** be uncommented to enable secure HDFS if using privileged ports
-# to provide authentication of data transfer protocol.  This **MUST NOT** be
-# defined if SASL is configured for authentication of data transfer protocol
-# using non-privileged ports.
-#export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=/var/log/hadoop
-export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
-
-# Where log files are stored in the secure data environment.
-#export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
-
-###
-# HDFS Mover specific parameters
-###
-# Specify the JVM options to be used when starting the HDFS Mover.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HADOOP_MOVER_OPTS=""
-
-###
-# Advanced Users Only!
-###
-
-# The directory where pid files are stored. /tmp by default.
-# NOTE: this should be set to a directory that can only be written to by
-#       the user that will run the hadoop daemons.  Otherwise there is the
-#       potential for a symlink attack.
-#export HADOOP_PID_DIR=${HADOOP_PID_DIR}
-#export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
-
-# A string representing this instance of hadoop. $USER by default.
-#export HADOOP_IDENT_STRING=$USER

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics.properties b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics.properties
deleted file mode 100644
index c1b2eb7..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics.properties
+++ /dev/null
@@ -1,75 +0,0 @@
-# Configuration of the "dfs" context for null
-dfs.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "dfs" context for file
-#dfs.class=org.apache.hadoop.metrics.file.FileContext
-#dfs.period=10
-#dfs.fileName=/tmp/dfsmetrics.log
-
-# Configuration of the "dfs" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-# dfs.period=10
-# dfs.servers=localhost:8649
-
-
-# Configuration of the "mapred" context for null
-mapred.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "mapred" context for file
-#mapred.class=org.apache.hadoop.metrics.file.FileContext
-#mapred.period=10
-#mapred.fileName=/tmp/mrmetrics.log
-
-# Configuration of the "mapred" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-# mapred.period=10
-# mapred.servers=localhost:8649
-
-
-# Configuration of the "jvm" context for null
-#jvm.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "jvm" context for file
-#jvm.class=org.apache.hadoop.metrics.file.FileContext
-#jvm.period=10
-#jvm.fileName=/tmp/jvmmetrics.log
-
-# Configuration of the "jvm" context for ganglia
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-# jvm.period=10
-# jvm.servers=localhost:8649
-
-# Configuration of the "rpc" context for null
-rpc.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "rpc" context for file
-#rpc.class=org.apache.hadoop.metrics.file.FileContext
-#rpc.period=10
-#rpc.fileName=/tmp/rpcmetrics.log
-
-# Configuration of the "rpc" context for ganglia
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-# rpc.period=10
-# rpc.servers=localhost:8649
-
-
-# Configuration of the "ugi" context for null
-ugi.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "ugi" context for file
-#ugi.class=org.apache.hadoop.metrics.file.FileContext
-#ugi.period=10
-#ugi.fileName=/tmp/ugimetrics.log
-
-# Configuration of the "ugi" context for ganglia
-# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-# ugi.period=10
-# ugi.servers=localhost:8649
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics2.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics2.properties b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics2.properties
deleted file mode 100644
index 0c09228..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-metrics2.properties
+++ /dev/null
@@ -1,68 +0,0 @@
-# syntax: [prefix].[source|sink].[instance].[options]
-# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
-
-*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
-# default sampling period, in seconds
-*.period=10
-
-# The namenode-metrics.out will contain metrics from all context
-#namenode.sink.file.filename=namenode-metrics.out
-# Specifying a special sampling period for namenode:
-#namenode.sink.*.period=8
-
-#datanode.sink.file.filename=datanode-metrics.out
-
-#resourcemanager.sink.file.filename=resourcemanager-metrics.out
-
-#nodemanager.sink.file.filename=nodemanager-metrics.out
-
-#mrappmaster.sink.file.filename=mrappmaster-metrics.out
-
-#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
-
-# the following example split metrics of different
-# context to different sinks (in this case files)
-#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
-#nodemanager.sink.file_jvm.context=jvm
-#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
-#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
-#nodemanager.sink.file_mapred.context=mapred
-#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
-
-#
-# Below are for sending metrics to Ganglia
-#
-# for Ganglia 3.0 support
-# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
-#
-# for Ganglia 3.1 support
-# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-
-# *.sink.ganglia.period=10
-
-# default for supportsparse is false
-# *.sink.ganglia.supportsparse=true
-
-#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Tag values to use for the ganglia prefix. If not defined no tags are used.
-# If '*' all tags are used. If specifiying multiple tags separate them with 
-# commas. Note that the last segment of the property name is the context name.
-#
-#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
-#*.sink.ganglia.tagsForPrefix.dfs=
-#*.sink.ganglia.tagsForPrefix.rpc=
-#*.sink.ganglia.tagsForPrefix.mapred=
-
-#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-policy.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-policy.xml
deleted file mode 100644
index 2bf5c02..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-policy.xml
+++ /dev/null
@@ -1,226 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- 
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.user.mappings.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.ha.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HAService protocol used by HAAdmin to manage the
-      active and stand-by states of namenode.</description>
-  </property>
-
-  <property>
-    <name>security.zkfc.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for access to the ZK Failover Controller
-    </description>
-  </property>
-
-  <property>
-    <name>security.qjournal.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for QJournalProtocol, used by the NN to communicate with
-    JNs when using the QuorumJournalManager for edit logs.</description>
-  </property>
-
-  <property>
-    <name>security.mrhs.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HSClientProtocol, used by job clients to
-    communciate with the MR History Server job status etc. 
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <!-- YARN Protocols -->
-
-  <property>
-    <name>security.resourcetracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceTrackerProtocol, used by the
-    ResourceManager and NodeManager to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcemanager-administration.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceManagerAdministrationProtocol, for admin commands. 
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationclient.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationClientProtocol, used by the ResourceManager 
-    and applications submission clients to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationmaster.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationMasterProtocol, used by the ResourceManager 
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.containermanagement.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager 
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcelocalizer.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceLocalizer protocol, used by the NodeManager 
-    and ResourceLocalizer to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for MRClientProtocol, used by job clients to
-    communciate with the MR ApplicationMaster to query job status etc. 
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationhistory.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationHistoryProtocol, used by the timeline
-    server and the generic history service client to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hdfs-site.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hdfs-site.xml
deleted file mode 100644
index 3f4f152..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hdfs-site.xml
+++ /dev/null
@@ -1,100 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-	<property>
-		<name>dfs.name.dir</name>
-		<value>/tmp/hdfs/name</value>
-		<final>true</final>
-	</property>
-
-	<property>
-		<name>dfs.data.dir</name>
-		<value>/tmp/hdfs/data</value>
-		<final>true</final>
-	</property>
-
-	<property>
-		<name>dfs.permissions</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>dfs.support.append</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>dfs.block.local-path-access.user</name>
-		<value>${user.name}</value>
-	</property>
-
-	<property>
-		<name>dfs.replication</name>
-		<value>3</value>
-	</property>
-
-	<property>
-		<name>dfs.datanode.socket.write.timeout</name>
-		<value>0</value>
-		<description>
-			used for sockets to and from datanodes. It is 8 minutes by default. Some
-			users set this to 0, effectively disabling the write timeout.
-		</description>
-	</property>
-
-	<property>
-		<name>dfs.webhdfs.enabled</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>dfs.allow.truncate</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>dfs.namenode.fs-limits.min-block-size</name>
-		<value>1024</value>
-	</property>
-
-	<property>
-		<name>dfs.client.read.shortcircuit</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>dfs.domain.socket.path</name>
-		<value>/var/lib/hadoop-hdfs/dn_socket</value>
-	</property>
-
-	<property>
-		<name>dfs.block.access.token.enable</name>
-		<value>true</value>
-		<description>
-			If "true", access tokens are used as capabilities for accessing
-			datanodes.
-			If "false", no access tokens are checked on accessing datanodes.
-		</description>
-	</property>
-	
-	<property>
-		<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
-		<value>false</value>
-	</property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-acls.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-acls.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-acls.xml
deleted file mode 100644
index cba69f4..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-acls.xml
+++ /dev/null
@@ -1,135 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-
-  <!-- This file is hot-reloaded when it changes -->
-
-  <!-- KMS ACLs -->
-
-  <property>
-    <name>hadoop.kms.acl.CREATE</name>
-    <value>*</value>
-    <description>
-      ACL for create-key operations.
-      If the user is not in the GET ACL, the key material is not returned
-      as part of the response.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.DELETE</name>
-    <value>*</value>
-    <description>
-      ACL for delete-key operations.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.ROLLOVER</name>
-    <value>*</value>
-    <description>
-      ACL for rollover-key operations.
-      If the user is not in the GET ACL, the key material is not returned
-      as part of the response.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.GET</name>
-    <value>*</value>
-    <description>
-      ACL for get-key-version and get-current-key operations.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.GET_KEYS</name>
-    <value>*</value>
-    <description>
-      ACL for get-keys operations.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.GET_METADATA</name>
-    <value>*</value>
-    <description>
-      ACL for get-key-metadata and get-keys-metadata operations.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
-    <value>*</value>
-    <description>
-      Complementary ACL for CREATE and ROLLOVER operations to allow the client
-      to provide the key material when creating or rolling a key.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.GENERATE_EEK</name>
-    <value>*</value>
-    <description>
-      ACL for generateEncryptedKey CryptoExtension operations.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.DECRYPT_EEK</name>
-    <value>*</value>
-    <description>
-      ACL for decryptEncryptedKey CryptoExtension operations.
-    </description>
-  </property>
-
-  <property>
-    <name>default.key.acl.MANAGEMENT</name>
-    <value>*</value>
-    <description>
-      default ACL for MANAGEMENT operations for all key acls that are not
-      explicitly defined.
-    </description>
-  </property>
-
-  <property>
-    <name>default.key.acl.GENERATE_EEK</name>
-    <value>*</value>
-    <description>
-      default ACL for GENERATE_EEK operations for all key acls that are not
-      explicitly defined.
-    </description>
-  </property>
-
-  <property>
-    <name>default.key.acl.DECRYPT_EEK</name>
-    <value>*</value>
-    <description>
-      default ACL for DECRYPT_EEK operations for all key acls that are not
-      explicitly defined.
-    </description>
-  </property>
-
-  <property>
-    <name>default.key.acl.READ</name>
-    <value>*</value>
-    <description>
-      default ACL for READ operations for all key acls that are not
-      explicitly defined.
-    </description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-env.sh b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-env.sh
deleted file mode 100644
index 44dfe6a..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-env.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License. See accompanying LICENSE file.
-#
-
-# Set kms specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs KMS
-# Java System properties for KMS should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# KMS logs directory
-#
-# export KMS_LOG=${KMS_HOME}/logs
-
-# KMS temporary directory
-#
-# export KMS_TEMP=${KMS_HOME}/temp
-
-# The HTTP port used by KMS
-#
-# export KMS_HTTP_PORT=16000
-
-# The Admin port used by KMS
-#
-# export KMS_ADMIN_PORT=`expr ${KMS_HTTP_PORT} + 1`
-
-# The maximum number of Tomcat handler threads
-#
-# export KMS_MAX_THREADS=1000
-
-# The location of the SSL keystore if using SSL
-#
-# export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
-
-# The password of the SSL keystore if using SSL
-#
-# export KMS_SSL_KEYSTORE_PASS=password
-
-# The full path to any native libraries that need to be loaded
-# (For eg. location of natively compiled tomcat Apache portable
-# runtime (APR) libraries
-#
-# export JAVA_LIBRARY_PATH=${HOME}/lib/native

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-log4j.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-log4j.properties b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-log4j.properties
deleted file mode 100644
index 8e6d909..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-log4j.properties
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'kms.log.dir' is not defined at KMS start up time
-# Setup sets its value to '${kms.home}/logs'
-
-log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.kms.DatePattern='.'yyyy-MM-dd
-log4j.appender.kms.File=${kms.log.dir}/kms.log
-log4j.appender.kms.Append=true
-log4j.appender.kms.layout=org.apache.log4j.PatternLayout
-log4j.appender.kms.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n
-
-log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd
-log4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log
-log4j.appender.kms-audit.Append=true
-log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
-log4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n
-
-log4j.logger.kms-audit=INFO, kms-audit
-log4j.additivity.kms-audit=false
-
-log4j.rootLogger=ALL, kms
-log4j.logger.org.apache.hadoop.conf=ERROR
-log4j.logger.org.apache.hadoop=INFO
-log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
\ No newline at end of file


[06/50] [abbrv] incubator-hawq git commit: Revert "HAWQ-1242. hawq-site.xml default content has wrong guc variable names"

Posted by es...@apache.org.
Revert "HAWQ-1242. hawq-site.xml default content has wrong guc variable names"

This reverts commit 61646cd55fddb4ef3feafe9fd125763796d518c0.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/94239f5e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/94239f5e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/94239f5e

Branch: refs/heads/2.1.0.0-incubating
Commit: 94239f5ed5ecf39e1650ec4b5d348ec9a907edc9
Parents: 61646cd
Author: Yi <yj...@pivotal.io>
Authored: Fri Dec 30 16:06:44 2016 +1100
Committer: Yi <yj...@pivotal.io>
Committed: Fri Dec 30 16:06:44 2016 +1100

----------------------------------------------------------------------
 src/backend/utils/misc/etc/hawq-site.xml          | 8 ++++----
 src/backend/utils/misc/etc/template-hawq-site.xml | 8 ++++----
 2 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/94239f5e/src/backend/utils/misc/etc/hawq-site.xml
----------------------------------------------------------------------
diff --git a/src/backend/utils/misc/etc/hawq-site.xml b/src/backend/utils/misc/etc/hawq-site.xml
index 012ccbf..b327ab4 100644
--- a/src/backend/utils/misc/etc/hawq-site.xml
+++ b/src/backend/utils/misc/etc/hawq-site.xml
@@ -33,7 +33,7 @@ under the License.
 	</property>
 
 	<property>
-		<name>standby_address_host</name>
+		<name>hawq_standby_address_host</name>
 		<value>none</value>
 		<description>The host name of hawq standby master.</description>
 	</property>
@@ -45,19 +45,19 @@ under the License.
 	</property>
 
 	<property>
-		<name>dfs_url</name>
+		<name>hawq_dfs_url</name>
 		<value>localhost:8020/hawq_default</value>
 		<description>URL for accessing HDFS.</description>
 	</property>
 
 	<property>
-		<name>master_directory</name>
+		<name>hawq_master_directory</name>
 		<value>~/hawq-data-directory/masterdd</value>
 		<description>The directory of hawq master.</description>
 	</property>
 
 	<property>
-		<name>segment_directory</name>
+		<name>hawq_segment_directory</name>
 		<value>~/hawq-data-directory/segmentdd</value>
 		<description>The directory of hawq segment.</description>
 	</property> 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/94239f5e/src/backend/utils/misc/etc/template-hawq-site.xml
----------------------------------------------------------------------
diff --git a/src/backend/utils/misc/etc/template-hawq-site.xml b/src/backend/utils/misc/etc/template-hawq-site.xml
index 1901db3..cf6273a 100644
--- a/src/backend/utils/misc/etc/template-hawq-site.xml
+++ b/src/backend/utils/misc/etc/template-hawq-site.xml
@@ -33,7 +33,7 @@ under the License.
 	</property>
 
 	<property>
-		<name>standby_address_host</name>
+		<name>hawq_standby_address_host</name>
 		<value>%standby.host%</value>
 		<description>The host name of hawq standby master.</description>
 	</property>
@@ -45,19 +45,19 @@ under the License.
 	</property>
 
 	<property>
-		<name>dfs_url</name>
+		<name>hawq_dfs_url</name>
 		<value>%namenode.host%:%namenode.port%/%hawq.file.space%</value>
 		<description>URL for accessing HDFS.</description>
 	</property>
 
 	<property>
-		<name>master_directory</name>
+		<name>hawq_master_directory</name>
 		<value>%master.directory%</value>
 		<description>The directory of hawq master.</description>
 	</property>
 
 	<property>
-		<name>segment_directory</name>
+		<name>hawq_segment_directory</name>
 		<value>%segment.directory%</value>
 		<description>The directory of hawq segment.</description>
 	</property> 


[48/50] [abbrv] incubator-hawq git commit: HAWQ-1309. Use default value for pxf user/port

Posted by es...@apache.org.
HAWQ-1309. Use default value for pxf user/port


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/f6452d27
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/f6452d27
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/f6452d27

Branch: refs/heads/2.1.0.0-incubating
Commit: f6452d273885619673f9e7c754d133e17252a38c
Parents: 42c1cc1
Author: Shivram Mani <sh...@gmail.com>
Authored: Thu Feb 2 14:08:55 2017 -0800
Committer: Kavinder Dhaliwal <ka...@gmail.com>
Committed: Thu Feb 2 14:26:50 2017 -0800

----------------------------------------------------------------------
 pxf/pxf-service/src/scripts/pxf-service | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/f6452d27/pxf/pxf-service/src/scripts/pxf-service
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/scripts/pxf-service b/pxf/pxf-service/src/scripts/pxf-service
index 2d63310..39d167e 100644
--- a/pxf/pxf-service/src/scripts/pxf-service
+++ b/pxf/pxf-service/src/scripts/pxf-service
@@ -34,8 +34,8 @@ else
 	source $env_script
 fi
 
-pxf_user=$PXF_USER
-instance_port=$PXF_PORT
+pxf_user=${PXF_USER:-pxf}
+instance_port=${PXF_PORT:-51200}
 instance_name=pxf-service
 
 if [ -z $PXF_HOME ]; then


[42/50] [abbrv] incubator-hawq git commit: HAWQ-1294. Removed ALL privilege from HAWQ Ranger definition

Posted by es...@apache.org.
HAWQ-1294. Removed ALL privilege from HAWQ Ranger definition

(closes #1101)


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/d3983eb5
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/d3983eb5
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/d3983eb5

Branch: refs/heads/2.1.0.0-incubating
Commit: d3983eb56312b076f2e5fff000cda17f027da291
Parents: 8a5e65b
Author: Alexander Denissov <ad...@pivotal.io>
Authored: Wed Jan 25 11:12:48 2017 -0800
Committer: Alexander Denissov <ad...@pivotal.io>
Committed: Thu Jan 26 11:59:13 2017 -0800

----------------------------------------------------------------------
 ranger-plugin/conf/ranger-servicedef-hawq.json   | 19 -------------------
 .../authorization/model/HawqPrivilege.java       |  3 +--
 2 files changed, 1 insertion(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/d3983eb5/ranger-plugin/conf/ranger-servicedef-hawq.json
----------------------------------------------------------------------
diff --git a/ranger-plugin/conf/ranger-servicedef-hawq.json b/ranger-plugin/conf/ranger-servicedef-hawq.json
index 03005bc..5e22ad1 100644
--- a/ranger-plugin/conf/ranger-servicedef-hawq.json
+++ b/ranger-plugin/conf/ranger-servicedef-hawq.json
@@ -212,25 +212,6 @@
       "itemId": 12,
       "name": "usage-schema",
       "label": "usage-schema"
-    },
-    {
-      "itemId": 13,
-      "name": "all",
-      "label": "All",
-      "impliedGrants": [
-        "select",
-        "insert",
-        "update",
-        "delete",
-        "references",
-        "usage",
-        "create",
-        "connect",
-        "execute",
-        "temp",
-        "create-schema",
-        "usage-schema"
-      ]
     }
   ],
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/d3983eb5/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqPrivilege.java
----------------------------------------------------------------------
diff --git a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqPrivilege.java b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqPrivilege.java
index ffce67a..2e44582 100644
--- a/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqPrivilege.java
+++ b/ranger-plugin/service/src/main/java/org/apache/hawq/ranger/authorization/model/HawqPrivilege.java
@@ -37,8 +37,7 @@ public enum HawqPrivilege {
     execute,
     temp,
     create_schema,
-    usage_schema,
-    all;
+    usage_schema;
 
     /**
      * Returns HawqPrivilege type by case-insensitive lookup of the value.


[36/50] [abbrv] incubator-hawq git commit: HAWQ-1282. Shared Input Scan may result in endless loop.

Posted by es...@apache.org.
HAWQ-1282. Shared Input Scan may result in endless loop.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/efa1230a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/efa1230a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/efa1230a

Branch: refs/heads/2.1.0.0-incubating
Commit: efa1230a95133c954f843f4ced46fab6acdfd7d2
Parents: 326fa4f
Author: hubertzhang <hu...@apache.org>
Authored: Thu Jan 19 13:58:48 2017 +0800
Committer: hubertzhang <hu...@apache.org>
Committed: Thu Jan 19 16:02:32 2017 +0800

----------------------------------------------------------------------
 src/backend/executor/nodeShareInputScan.c | 17 ++++++++++++++++-
 1 file changed, 16 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/efa1230a/src/backend/executor/nodeShareInputScan.c
----------------------------------------------------------------------
diff --git a/src/backend/executor/nodeShareInputScan.c b/src/backend/executor/nodeShareInputScan.c
index cb303be..0f08848 100644
--- a/src/backend/executor/nodeShareInputScan.c
+++ b/src/backend/executor/nodeShareInputScan.c
@@ -640,8 +640,13 @@ read_retry:
 	else if(sz == 0 || errno == EINTR)
 		goto read_retry;
 	else
+	{
+		if(fd >= 0)
+		{
+			gp_retry_close(fd);
+		}
 		elog(ERROR, "could not read from fifo: %m");
-
+	}
 	Assert(!"Never be here");
 	return 0;
 }
@@ -658,7 +663,13 @@ write_retry:
 	else if(sz == 0 || errno == EINTR)
 		goto write_retry;
 	else
+	{
+		if(fd >= 0)
+		{
+			gp_retry_close(fd);
+		}
 		elog(ERROR, "could not write to fifo: %m");
+	}
 
 	Assert(!"Never be here");
 	return 0;
@@ -914,6 +925,10 @@ writer_wait_for_acks(ShareInput_Lk_Context *pctxt, int share_id, int xslice)
 			int save_errno = errno;
 			elog(LOG, "SISC WRITER (shareid=%d, slice=%d): notify still wait for an answer, errno %d",
 					share_id, currentSliceId, save_errno);
+			/*if error(except EINTR) happens in select, we just return to avoid endless loop*/
+			if(errno != EINTR){
+				return;
+			}
 		}
 	}
 }


[50/50] [abbrv] incubator-hawq git commit: [HAWQ-1236] - Update HAWQ DB version to 2.1.0.0-incubating.

Posted by es...@apache.org.
[HAWQ-1236] - Update HAWQ DB version to 2.1.0.0-incubating.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/12c7df01
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/12c7df01
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/12c7df01

Branch: refs/heads/2.1.0.0-incubating
Commit: 12c7df017551f1c3b0deb38c7243db3e018ef62c
Parents: 7d02472
Author: Ed Espino <ee...@pivotal.io>
Authored: Fri Dec 23 20:32:44 2016 -0800
Committer: Ed Espino <ee...@pivotal.io>
Committed: Fri Feb 3 00:47:14 2017 -0800

----------------------------------------------------------------------
 getversion | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/12c7df01/getversion
----------------------------------------------------------------------
diff --git a/getversion b/getversion
index 6aa6adf..ac43748 100755
--- a/getversion
+++ b/getversion
@@ -18,7 +18,7 @@
 # under the License.
 #
 
-GP_VERSION="2.1.0.0"
+GP_VERSION="2.1.0.0-incubating"
 
 GP_BUILDNUMBER=dev
 if [ -f BUILD_NUMBER ] ; then


[45/50] [abbrv] incubator-hawq git commit: HAWQ-1228. Use profile based on file format in HCatalog integration(HiveRC, HiveText profiles).

Posted by es...@apache.org.
HAWQ-1228. Use profile based on file format in HCatalog integration(HiveRC, HiveText profiles).


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/6fa1ced2
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/6fa1ced2
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/6fa1ced2

Branch: refs/heads/2.1.0.0-incubating
Commit: 6fa1ced20e8bb2820b73e6904f77c4b4a1ed6de2
Parents: aac8868
Author: Oleksandr Diachenko <od...@pivotal.io>
Authored: Mon Jan 30 23:38:06 2017 -0800
Committer: Oleksandr Diachenko <od...@pivotal.io>
Committed: Mon Jan 30 23:38:50 2017 -0800

----------------------------------------------------------------------
 pxf/gradle.properties                           |   2 +-
 .../java/org/apache/hawq/pxf/api/Metadata.java  |  51 +++-
 .../org/apache/hawq/pxf/api/OutputFormat.java   |  37 ++-
 .../hawq/pxf/api/utilities/InputData.java       |   1 +
 .../org/apache/hawq/pxf/api/MetadataTest.java   |   2 +-
 .../hawq/pxf/plugins/hive/HiveAccessor.java     |  14 +-
 .../plugins/hive/HiveColumnarSerdeResolver.java |  60 ++---
 .../pxf/plugins/hive/HiveDataFragmenter.java    |  28 +-
 .../plugins/hive/HiveInputFormatFragmenter.java |  41 ---
 .../pxf/plugins/hive/HiveLineBreakAccessor.java |  10 +-
 .../pxf/plugins/hive/HiveMetadataFetcher.java   |  85 +++---
 .../hawq/pxf/plugins/hive/HiveORCAccessor.java  |   9 +-
 .../pxf/plugins/hive/HiveORCSerdeResolver.java  |  44 +---
 .../pxf/plugins/hive/HiveRCFileAccessor.java    |  10 +-
 .../hawq/pxf/plugins/hive/HiveResolver.java     | 107 ++++----
 .../plugins/hive/HiveStringPassResolver.java    |  39 ++-
 .../hawq/pxf/plugins/hive/HiveUserData.java     | 135 ++++++++++
 .../hive/utilities/EnumHiveToHawqType.java      |  31 ++-
 .../plugins/hive/utilities/HiveUtilities.java   | 263 +++++++++++++++----
 .../plugins/hive/utilities/ProfileFactory.java  |  61 +++++
 .../plugins/hive/HiveMetadataFetcherTest.java   |   3 +
 .../pxf/plugins/hive/HiveORCAccessorTest.java   |   9 +-
 .../hive/utilities/HiveUtilitiesTest.java       |  53 ++++
 .../hive/utilities/ProfileFactoryTest.java      |  65 +++++
 .../hawq/pxf/service/BridgeOutputBuilder.java   |   8 +-
 .../pxf/service/MetadataResponseFormatter.java  |   3 +-
 .../apache/hawq/pxf/service/ProfileFactory.java |  45 ----
 .../hawq/pxf/service/rest/MetadataResource.java |   9 +-
 .../hawq/pxf/service/rest/VersionResource.java  |   2 +-
 .../pxf/service/utilities/ProtocolData.java     |  22 +-
 .../src/main/resources/pxf-profiles-default.xml |  14 +-
 .../service/MetadataResponseFormatterTest.java  |  16 +-
 src/backend/access/external/fileam.c            |   3 +
 src/backend/access/external/pxfheaders.c        |  21 +-
 .../access/external/test/pxfheaders_test.c      |  18 ++
 src/backend/catalog/external/externalmd.c       | 137 +++++++---
 src/bin/gpfusion/gpbridgeapi.c                  |   6 +-
 src/include/access/hd_work_mgr.h                |   2 +
 src/include/access/pxfheaders.h                 |   1 +
 src/include/access/pxfuriparser.h               |   2 +-
 src/include/catalog/external/itemmd.h           |   5 +
 src/include/catalog/pg_exttable.h               |  14 +-
 .../regress/data/hcatalog/single_table.json     |   2 +-
 .../data/hcatalog/single_table_text.json        |   1 +
 src/test/regress/input/json_load.source         |  12 +-
 src/test/regress/json_utils.c                   |  24 +-
 src/test/regress/output/json_load.source        |  35 ++-
 47 files changed, 1109 insertions(+), 453 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/gradle.properties
----------------------------------------------------------------------
diff --git a/pxf/gradle.properties b/pxf/gradle.properties
index b003c56..2af17ef 100644
--- a/pxf/gradle.properties
+++ b/pxf/gradle.properties
@@ -23,5 +23,5 @@ hiveVersion=1.2.1
 hbaseVersionJar=1.1.2
 hbaseVersionRPM=1.1.2
 tomcatVersion=7.0.62
-pxfProtocolVersion=v14
+pxfProtocolVersion=v15
 osFamily=el6

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/Metadata.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/Metadata.java b/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/Metadata.java
index 9e1c137..bb22d41 100644
--- a/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/Metadata.java
+++ b/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/Metadata.java
@@ -22,6 +22,8 @@ package org.apache.hawq.pxf.api;
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.hawq.pxf.api.utilities.EnumHawqType;
 import org.apache.commons.lang.StringUtils;
@@ -68,14 +70,16 @@ public class Metadata {
     }
 
     /**
-     * Class representing item field - name, type, source type, modifiers.
+     * Class representing item field - name, type, source type, is complex type?, modifiers.
      * Type - exposed type of field
      * Source type - type of field in underlying source
+     * Is complex type - whether source type is complex type
      * Modifiers - additional attributes which describe type or field
      */
     public static class Field {
         private String name;
         private EnumHawqType type; // field type which PXF exposes
+        private boolean isComplexType; // whether source field's type is complex
         private String sourceType; // field type PXF reads from
         private String[] modifiers; // type modifiers, optional field
 
@@ -91,12 +95,17 @@ public class Metadata {
             this.sourceType = sourceType;
         }
 
-        public Field(String name, EnumHawqType type, String sourceType,
-                String[] modifiers) {
+        public Field(String name, EnumHawqType type, String sourceType, String[] modifiers) {
             this(name, type, sourceType);
             this.modifiers = modifiers;
         }
 
+        public Field(String name, EnumHawqType type, boolean isComplexType, String sourceType, String[] modifiers) {
+            this(name, type, sourceType);
+            this.modifiers = modifiers;
+            this.isComplexType = isComplexType;
+        }
+
         public String getName() {
             return name;
         }
@@ -112,6 +121,14 @@ public class Metadata {
         public String[] getModifiers() {
             return modifiers;
         }
+
+        public boolean isComplexType() {
+            return isComplexType;
+        }
+
+        public void setComplexType(boolean isComplexType) {
+            this.isComplexType = isComplexType;
+        }
     }
 
     /**
@@ -123,6 +140,34 @@ public class Metadata {
      * Item's fields
      */
     private List<Metadata.Field> fields;
+    private Set<OutputFormat> outputFormats;
+    private Map<String, String> outputParameters;
+
+    /**
+     * Returns an item's output formats, @see OutputFormat.
+     *
+     * @return item's output formats
+     */
+    public Set<OutputFormat> getOutputFormats() {
+        return outputFormats;
+    }
+
+    public void setOutputFormats(Set<OutputFormat> outputFormats) {
+        this.outputFormats = outputFormats;
+    }
+
+    /**
+     * Returns an item's output parameters, for example - delimiters etc.
+     *
+     * @return item's output parameters
+     */
+    public Map<String, String> getOutputParameters() {
+        return outputParameters;
+    }
+
+    public void setOutputParameters(Map<String, String> outputParameters) {
+        this.outputParameters = outputParameters;
+    }
 
     /**
      * Constructs an item's Metadata.

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java b/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
index 230f9ff..565db13 100644
--- a/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
+++ b/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
@@ -21,6 +21,39 @@ package org.apache.hawq.pxf.api;
 
 
 /**
- * PXF supported output formats: {@link #TEXT} and {@link #BINARY}
+ * PXF supported output formats: {@link org.apache.hawq.pxf.service.io.Text} and {@link org.apache.hawq.pxf.service.io.GPDBWritable}
  */
-public enum OutputFormat {TEXT, BINARY}
+public enum OutputFormat {
+    TEXT("org.apache.hawq.pxf.service.io.Text"),
+    GPDBWritable("org.apache.hawq.pxf.service.io.GPDBWritable");
+
+    private String className;
+
+    OutputFormat(String className) {
+        this.className = className;
+    }
+
+    /**
+     * Returns a formats's implementation class name
+     *
+     * @return a formats's implementation class name
+     */
+    public String getClassName() {
+        return className;
+    }
+
+    /**
+     * Looks up output format for given class name if it exists.
+     *
+     * @throws UnsupportedTypeException if output format with given class wasn't found
+     * @return an output format with given class name
+     */
+    public static OutputFormat getOutputFormat(String className) {
+        for (OutputFormat of : values()) {
+            if (of.getClassName().equals(className)) {
+                return of;
+            }
+        }
+        throw new UnsupportedTypeException("Unable to find output format by given class name: " + className);
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/utilities/InputData.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/utilities/InputData.java b/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/utilities/InputData.java
index 5afedca..9816fdc 100644
--- a/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/utilities/InputData.java
+++ b/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/utilities/InputData.java
@@ -31,6 +31,7 @@ import java.util.*;
  */
 public class InputData {
 
+    public static final String DELIMITER_KEY = "DELIMITER";
     public static final int INVALID_SPLIT_IDX = -1;
     private static final Log LOG = LogFactory.getLog(InputData.class);
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-api/src/test/java/org/apache/hawq/pxf/api/MetadataTest.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-api/src/test/java/org/apache/hawq/pxf/api/MetadataTest.java b/pxf/pxf-api/src/test/java/org/apache/hawq/pxf/api/MetadataTest.java
index 327a15b..9244ba2 100644
--- a/pxf/pxf-api/src/test/java/org/apache/hawq/pxf/api/MetadataTest.java
+++ b/pxf/pxf-api/src/test/java/org/apache/hawq/pxf/api/MetadataTest.java
@@ -32,7 +32,7 @@ public class MetadataTest {
     @Test
     public void createFieldEmptyNameType() {
         try {
-            Metadata.Field field = new Metadata.Field(null, null, null, null);
+            Metadata.Field field = new Metadata.Field(null, null, false, null, null);
             fail("Empty name, type and source type shouldn't be allowed.");
         } catch (IllegalArgumentException e) {
             assertEquals("Field name, type and source type cannot be empty", e.getMessage());

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveAccessor.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveAccessor.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveAccessor.java
index ef9f76e..ea3accb 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveAccessor.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveAccessor.java
@@ -28,6 +28,7 @@ import org.apache.hawq.pxf.api.UnsupportedTypeException;
 import org.apache.hawq.pxf.api.utilities.ColumnDescriptor;
 import org.apache.hawq.pxf.api.utilities.InputData;
 import org.apache.hawq.pxf.plugins.hdfs.HdfsSplittableDataAccessor;
+import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.mapred.InputFormat;
@@ -42,10 +43,6 @@ import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
 
-import static org.apache.hawq.pxf.api.io.DataType.*;
-import static org.apache.hawq.pxf.api.io.DataType.BPCHAR;
-import static org.apache.hawq.pxf.api.io.DataType.BYTEA;
-
 /**
  * Accessor for Hive tables. The accessor will open and read a split belonging
  * to a Hive table. Opening a split means creating the corresponding InputFormat
@@ -138,12 +135,11 @@ public class HiveAccessor extends HdfsSplittableDataAccessor {
      */
     private InputFormat<?, ?> createInputFormat(InputData input)
             throws Exception {
-        String userData = new String(input.getFragmentUserData());
-        String[] toks = userData.split(HiveDataFragmenter.HIVE_UD_DELIM);
-        initPartitionFields(toks[3]);
-        filterInFragmenter = new Boolean(toks[4]);
+        HiveUserData hiveUserData = HiveUtilities.parseHiveUserData(input);
+        initPartitionFields(hiveUserData.getPartitionKeys());
+        filterInFragmenter = hiveUserData.isFilterInFragmenter();
         return HiveDataFragmenter.makeInputFormat(
-                toks[0]/* inputFormat name */, jobConf);
+                hiveUserData.getInputFormatName()/* inputFormat name */, jobConf);
     }
 
     /*

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveColumnarSerdeResolver.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveColumnarSerdeResolver.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveColumnarSerdeResolver.java
index 362ac0d..7d85efe 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveColumnarSerdeResolver.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveColumnarSerdeResolver.java
@@ -22,12 +22,15 @@ package org.apache.hawq.pxf.plugins.hive;
 import org.apache.hawq.pxf.api.BadRecordException;
 import org.apache.hawq.pxf.api.OneField;
 import org.apache.hawq.pxf.api.OneRow;
+import org.apache.hawq.pxf.api.OutputFormat;
 import org.apache.hawq.pxf.api.UnsupportedTypeException;
 import org.apache.hawq.pxf.api.io.DataType;
 import org.apache.hawq.pxf.api.utilities.ColumnDescriptor;
 import org.apache.hawq.pxf.api.utilities.InputData;
 import org.apache.hawq.pxf.api.utilities.Utilities;
 import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
+import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities.PXF_HIVE_SERDES;
+import org.apache.hawq.pxf.service.utilities.ProtocolData;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -40,7 +43,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.*;
-
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.mapred.JobConf;
 
@@ -57,11 +59,10 @@ import static org.apache.hawq.pxf.api.io.DataType.VARCHAR;
  */
 public class HiveColumnarSerdeResolver extends HiveResolver {
     private static final Log LOG = LogFactory.getLog(HiveColumnarSerdeResolver.class);
-    private ColumnarSerDeBase deserializer;
     private boolean firstColumn;
     private StringBuilder builder;
     private StringBuilder parts;
-    private HiveInputFormatFragmenter.PXF_HIVE_SERDES serdeType;
+    private HiveUtilities.PXF_HIVE_SERDES serdeType;
 
     public HiveColumnarSerdeResolver(InputData input) throws Exception {
         super(input);
@@ -70,24 +71,22 @@ public class HiveColumnarSerdeResolver extends HiveResolver {
     /* read the data supplied by the fragmenter: inputformat name, serde name, partition keys */
     @Override
     void parseUserData(InputData input) throws Exception {
-        String[] toks = HiveInputFormatFragmenter.parseToks(input);
-        String serdeEnumStr = toks[HiveInputFormatFragmenter.TOK_SERDE];
-        if (serdeEnumStr.equals(HiveInputFormatFragmenter.PXF_HIVE_SERDES.COLUMNAR_SERDE.name())) {
-            serdeType = HiveInputFormatFragmenter.PXF_HIVE_SERDES.COLUMNAR_SERDE;
-        } else if (serdeEnumStr.equals(HiveInputFormatFragmenter.PXF_HIVE_SERDES.LAZY_BINARY_COLUMNAR_SERDE.name())) {
-            serdeType = HiveInputFormatFragmenter.PXF_HIVE_SERDES.LAZY_BINARY_COLUMNAR_SERDE;
-        }
-        else {
-            throw new UnsupportedTypeException("Unsupported Hive Serde: " + serdeEnumStr);
-        }
+        HiveUserData hiveUserData = HiveUtilities.parseHiveUserData(input, HiveUtilities.PXF_HIVE_SERDES.COLUMNAR_SERDE, HiveUtilities.PXF_HIVE_SERDES.LAZY_BINARY_COLUMNAR_SERDE);
+        String serdeClassName = hiveUserData.getSerdeClassName();
+
+        serdeType = PXF_HIVE_SERDES.getPxfHiveSerde(serdeClassName);
         parts = new StringBuilder();
-        partitionKeys = toks[HiveInputFormatFragmenter.TOK_KEYS];
+        partitionKeys = hiveUserData.getPartitionKeys();
         parseDelimiterChar(input);
     }
 
     @Override
     void initPartitionFields() {
-        initPartitionFields(parts);
+        if (((ProtocolData) inputData).outputFormat() == OutputFormat.TEXT) {
+            initTextPartitionFields(parts);
+        } else {
+            super.initPartitionFields();
+        }
     }
 
     /**
@@ -97,15 +96,19 @@ public class HiveColumnarSerdeResolver extends HiveResolver {
      */
     @Override
     public List<OneField> getFields(OneRow onerow) throws Exception {
-        firstColumn = true;
-        builder = new StringBuilder();
-        Object tuple = deserializer.deserialize((Writable) onerow.getData());
-        ObjectInspector oi = deserializer.getObjectInspector();
-
-        traverseTuple(tuple, oi);
-        /* We follow Hive convention. Partition fields are always added at the end of the record */
-        builder.append(parts);
-        return Collections.singletonList(new OneField(VARCHAR.getOID(), builder.toString()));
+        if (((ProtocolData) inputData).outputFormat() == OutputFormat.TEXT) {
+            firstColumn = true;
+            builder = new StringBuilder();
+            Object tuple = deserializer.deserialize((Writable) onerow.getData());
+            ObjectInspector oi = deserializer.getObjectInspector();
+    
+            traverseTuple(tuple, oi);
+            /* We follow Hive convention. Partition fields are always added at the end of the record */
+            builder.append(parts);
+            return Collections.singletonList(new OneField(VARCHAR.getOID(), builder.toString()));
+        } else {
+            return super.getFields(onerow);
+        }
     }
 
     /*
@@ -138,14 +141,7 @@ public class HiveColumnarSerdeResolver extends HiveResolver {
         serdeProperties.put(serdeConstants.LIST_COLUMNS, columnNames.toString());
         serdeProperties.put(serdeConstants.LIST_COLUMN_TYPES, columnTypes.toString());
 
-        if (serdeType == HiveInputFormatFragmenter.PXF_HIVE_SERDES.COLUMNAR_SERDE) {
-            deserializer = new ColumnarSerDe();
-        } else if (serdeType == HiveInputFormatFragmenter.PXF_HIVE_SERDES.LAZY_BINARY_COLUMNAR_SERDE) {
-            deserializer = new LazyBinaryColumnarSerDe();
-        } else {
-            throw new UnsupportedTypeException("Unsupported Hive Serde: " + serdeType.name()); /* we should not get here */
-        }
-
+        deserializer = HiveUtilities.createDeserializer(serdeType, HiveUtilities.PXF_HIVE_SERDES.COLUMNAR_SERDE, HiveUtilities.PXF_HIVE_SERDES.LAZY_BINARY_COLUMNAR_SERDE);
         deserializer.initialize(new JobConf(new Configuration(), HiveColumnarSerdeResolver.class), serdeProperties);
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveDataFragmenter.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveDataFragmenter.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveDataFragmenter.java
index 2d2b53e..a03d3b7 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveDataFragmenter.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveDataFragmenter.java
@@ -59,7 +59,7 @@ import org.apache.hawq.pxf.api.utilities.InputData;
 import org.apache.hawq.pxf.api.utilities.ProfilesConf;
 import org.apache.hawq.pxf.plugins.hdfs.utilities.HdfsUtilities;
 import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
-import org.apache.hawq.pxf.service.ProfileFactory;
+import org.apache.hawq.pxf.plugins.hive.utilities.ProfileFactory;
 
 /**
  * Fragmenter class for HIVE tables. <br>
@@ -78,7 +78,6 @@ public class HiveDataFragmenter extends Fragmenter {
     private static final Log LOG = LogFactory.getLog(HiveDataFragmenter.class);
     private static final short ALL_PARTS = -1;
 
-    public static final String HIVE_UD_DELIM = "!HUDD!";
     public static final String HIVE_1_PART_DELIM = "!H1PD!";
     public static final String HIVE_PARTITIONS_DELIM = "!HPAD!";
     public static final String HIVE_NO_PART_TBL = "!HNPT!";
@@ -163,6 +162,10 @@ public class HiveDataFragmenter extends Fragmenter {
 
         Table tbl = HiveUtilities.getHiveTable(client, tblDesc);
 
+        Metadata metadata = new Metadata(tblDesc);
+        HiveUtilities.getSchema(tbl, metadata);
+        boolean hasComplexTypes = HiveUtilities.hasComplexTypes(metadata);
+
         verifySchema(tbl);
 
         List<Partition> partitions = null;
@@ -228,7 +231,7 @@ public class HiveDataFragmenter extends Fragmenter {
 
         if (partitions.isEmpty()) {
             props = getSchema(tbl);
-            fetchMetaDataForSimpleTable(descTable, props);
+            fetchMetaDataForSimpleTable(descTable, props, hasComplexTypes);
         } else {
             List<FieldSchema> partitionKeys = tbl.getPartitionKeys();
 
@@ -239,7 +242,7 @@ public class HiveDataFragmenter extends Fragmenter {
                         tblDesc.getPath(), tblDesc.getName(),
                         partitionKeys);
                 fetchMetaDataForPartitionedTable(descPartition, props,
-                        partition, partitionKeys, tblDesc.getName());
+                        partition, partitionKeys, tblDesc.getName(), hasComplexTypes);
             }
         }
     }
@@ -255,29 +258,30 @@ public class HiveDataFragmenter extends Fragmenter {
     }
 
     private void fetchMetaDataForSimpleTable(StorageDescriptor stdsc,
-                                             Properties props) throws Exception {
-        fetchMetaDataForSimpleTable(stdsc, props, null);
+                                             Properties props, boolean hasComplexTypes) throws Exception {
+        fetchMetaDataForSimpleTable(stdsc, props, null, hasComplexTypes);
     }
 
     private void fetchMetaDataForSimpleTable(StorageDescriptor stdsc,
-                                             Properties props, String tableName)
+                                             Properties props, String tableName, boolean hasComplexTypes)
             throws Exception {
         fetchMetaData(new HiveTablePartition(stdsc, props, null, null,
-                tableName));
+                tableName), hasComplexTypes);
     }
 
     private void fetchMetaDataForPartitionedTable(StorageDescriptor stdsc,
                                                   Properties props,
                                                   Partition partition,
                                                   List<FieldSchema> partitionKeys,
-                                                  String tableName)
+                                                  String tableName,
+                                                  boolean hasComplexTypes)
             throws Exception {
         fetchMetaData(new HiveTablePartition(stdsc, props, partition,
-                partitionKeys, tableName));
+                partitionKeys, tableName), hasComplexTypes);
     }
 
     /* Fills a table partition */
-    private void fetchMetaData(HiveTablePartition tablePartition)
+    private void fetchMetaData(HiveTablePartition tablePartition, boolean hasComplexTypes)
             throws Exception {
         InputFormat<?, ?> fformat = makeInputFormat(
                 tablePartition.storageDesc.getInputFormat(), jobConf);
@@ -285,7 +289,7 @@ public class HiveDataFragmenter extends Fragmenter {
         if (inputData.getProfile() != null) {
             // evaluate optimal profile based on file format if profile was explicitly specified in url
             // if user passed accessor+fragmenter+resolver - use them
-            profile = ProfileFactory.get(fformat);
+            profile = ProfileFactory.get(fformat, hasComplexTypes);
         }
         String fragmenterForProfile = null;
         if (profile != null) {

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveInputFormatFragmenter.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveInputFormatFragmenter.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveInputFormatFragmenter.java
index ca4501b..9199118 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveInputFormatFragmenter.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveInputFormatFragmenter.java
@@ -55,11 +55,6 @@ import java.util.Properties;
  */
 public class HiveInputFormatFragmenter extends HiveDataFragmenter {
     private static final Log LOG = LogFactory.getLog(HiveInputFormatFragmenter.class);
-    private static final int EXPECTED_NUM_OF_TOKS = 4;
-    public static final int TOK_SERDE = 0;
-    public static final int TOK_KEYS = 1;
-    public static final int TOK_FILTER_DONE = 2;
-    public static final int TOK_COL_TYPES = 3;
 
     /** Defines the Hive input formats currently supported in pxf */
     public enum PXF_HIVE_INPUT_FORMATS {
@@ -68,14 +63,6 @@ public class HiveInputFormatFragmenter extends HiveDataFragmenter {
         ORC_FILE_INPUT_FORMAT
     }
 
-    /** Defines the Hive serializers (serde classes) currently supported in pxf */
-    public enum PXF_HIVE_SERDES {
-        COLUMNAR_SERDE,
-        LAZY_BINARY_COLUMNAR_SERDE,
-        LAZY_SIMPLE_SERDE,
-        ORC_SERDE
-    }
-
     /**
      * Constructs a HiveInputFormatFragmenter.
      *
@@ -85,34 +72,6 @@ public class HiveInputFormatFragmenter extends HiveDataFragmenter {
         super(inputData, HiveInputFormatFragmenter.class);
     }
 
-    /**
-     * Extracts the user data:
-     * serde, partition keys and whether filter was included in fragmenter
-     *
-     * @param input input data from client
-     * @param supportedSerdes supported serde names
-     * @return parsed tokens
-     * @throws UserDataException if user data contains unsupported serde
-     *                           or wrong number of tokens
-     */
-    static public String[] parseToks(InputData input, String... supportedSerdes)
-            throws UserDataException {
-        String userData = new String(input.getFragmentUserData());
-        String[] toks = userData.split(HIVE_UD_DELIM);
-        if (supportedSerdes.length > 0
-                && !Arrays.asList(supportedSerdes).contains(toks[TOK_SERDE])) {
-            throw new UserDataException(toks[TOK_SERDE]
-                    + " serializer isn't supported by " + input.getAccessor());
-        }
-
-        if (toks.length != (EXPECTED_NUM_OF_TOKS)) {
-            throw new UserDataException("HiveInputFormatFragmenter expected "
-                    + EXPECTED_NUM_OF_TOKS + " tokens, but got " + toks.length);
-        }
-
-        return toks;
-    }
-
     /*
      * Checks that hive fields and partitions match the HAWQ schema. Throws an
      * exception if: - the number of fields (+ partitions) do not match the HAWQ

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveLineBreakAccessor.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveLineBreakAccessor.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveLineBreakAccessor.java
index ed4f908..66680bb 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveLineBreakAccessor.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveLineBreakAccessor.java
@@ -21,12 +21,12 @@ package org.apache.hawq.pxf.plugins.hive;
 
 
 import org.apache.hawq.pxf.api.utilities.InputData;
-
+import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
 import org.apache.hadoop.mapred.*;
 
 import java.io.IOException;
 
-import static org.apache.hawq.pxf.plugins.hive.HiveInputFormatFragmenter.PXF_HIVE_SERDES;
+import static org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities.PXF_HIVE_SERDES;
 
 /**
  * Specialization of HiveAccessor for a Hive table stored as Text files.
@@ -43,9 +43,9 @@ public class HiveLineBreakAccessor extends HiveAccessor {
     public HiveLineBreakAccessor(InputData input) throws Exception {
         super(input, new TextInputFormat());
         ((TextInputFormat) inputFormat).configure(jobConf);
-        String[] toks = HiveInputFormatFragmenter.parseToks(input, PXF_HIVE_SERDES.LAZY_SIMPLE_SERDE.name());
-        initPartitionFields(toks[HiveInputFormatFragmenter.TOK_KEYS]);
-        filterInFragmenter = new Boolean(toks[HiveInputFormatFragmenter.TOK_FILTER_DONE]);
+        HiveUserData hiveUserData = HiveUtilities.parseHiveUserData(input, PXF_HIVE_SERDES.LAZY_SIMPLE_SERDE);
+        initPartitionFields(hiveUserData.getPartitionKeys());
+        filterInFragmenter = hiveUserData.isFilterInFragmenter();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveMetadataFetcher.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveMetadataFetcher.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveMetadataFetcher.java
index 91f91e7..dc76289 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveMetadataFetcher.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveMetadataFetcher.java
@@ -21,33 +21,49 @@ package org.apache.hawq.pxf.plugins.hive;
 
 
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
-
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hawq.pxf.api.Metadata;
 import org.apache.hawq.pxf.api.MetadataFetcher;
+import org.apache.hawq.pxf.api.OutputFormat;
 import org.apache.hawq.pxf.api.UnsupportedTypeException;
 import org.apache.hawq.pxf.api.utilities.InputData;
+import org.apache.hawq.pxf.api.utilities.ProfilesConf;
 import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
+import org.apache.hawq.pxf.plugins.hive.utilities.ProfileFactory;
 
 /**
  * Class for connecting to Hive's MetaStore and getting schema of Hive tables.
  */
 public class HiveMetadataFetcher extends MetadataFetcher {
 
+    private static final String DELIM_FIELD = InputData.DELIMITER_KEY;
+
     private static final Log LOG = LogFactory.getLog(HiveMetadataFetcher.class);
     private HiveMetaStoreClient client;
+    private JobConf jobConf;
 
     public HiveMetadataFetcher(InputData md) {
         super(md);
 
         // init hive metastore client connection.
         client = HiveUtilities.initHiveClient();
+        jobConf = new JobConf(new Configuration());
     }
 
     /**
@@ -82,8 +98,28 @@ public class HiveMetadataFetcher extends MetadataFetcher {
             try {
                 Metadata metadata = new Metadata(tblDesc);
                 Table tbl = HiveUtilities.getHiveTable(client, tblDesc);
-                getSchema(tbl, metadata);
+                HiveUtilities.getSchema(tbl, metadata);
+                boolean hasComplexTypes = HiveUtilities.hasComplexTypes(metadata);
                 metadataList.add(metadata);
+                List<Partition> tablePartitions = client.listPartitionsByFilter(tblDesc.getPath(), tblDesc.getName(), "", (short) -1);
+                Set<OutputFormat> formats = new HashSet<OutputFormat>();
+                //If table has partitions - find out all formats
+                for (Partition tablePartition : tablePartitions) {
+                    String inputFormat = tablePartition.getSd().getInputFormat();
+                    OutputFormat outputFormat = getOutputFormat(inputFormat, hasComplexTypes);
+                    formats.add(outputFormat);
+                }
+                //If table has no partitions - get single format of table
+                if (tablePartitions.size() == 0 ) {
+                    String inputFormat = tbl.getSd().getInputFormat();
+                    OutputFormat outputFormat = getOutputFormat(inputFormat, hasComplexTypes);
+                    formats.add(outputFormat);
+                }
+                metadata.setOutputFormats(formats);
+                Map<String, String> outputParameters = new HashMap<String, String>();
+                Integer delimiterCode = HiveUtilities.getDelimiterCode(tbl.getSd());
+                outputParameters.put(DELIM_FIELD, delimiterCode.toString());
+                metadata.setOutputParameters(outputParameters);
             } catch (UnsupportedTypeException | UnsupportedOperationException e) {
                 if(ignoreErrors) {
                     LOG.warn("Metadata fetch for " + tblDesc.toString() + " failed. " + e.getMessage());
@@ -97,42 +133,13 @@ public class HiveMetadataFetcher extends MetadataFetcher {
         return metadataList;
     }
 
-
-    /**
-     * Populates the given metadata object with the given table's fields and partitions,
-     * The partition fields are added at the end of the table schema.
-     * Throws an exception if the table contains unsupported field types.
-     * Supported HCatalog types: TINYINT,
-     * SMALLINT, INT, BIGINT, BOOLEAN, FLOAT, DOUBLE, STRING, BINARY, TIMESTAMP,
-     * DATE, DECIMAL, VARCHAR, CHAR.
-     *
-     * @param tbl Hive table
-     * @param metadata schema of given table
-     */
-    private void getSchema(Table tbl, Metadata metadata) {
-
-        int hiveColumnsSize = tbl.getSd().getColsSize();
-        int hivePartitionsSize = tbl.getPartitionKeysSize();
-
-        if (LOG.isDebugEnabled()) {
-            LOG.debug("Hive table: " + hiveColumnsSize + " fields, " + hivePartitionsSize + " partitions.");
-        }
-
-        // check hive fields
-        try {
-            List<FieldSchema> hiveColumns = tbl.getSd().getCols();
-            for (FieldSchema hiveCol : hiveColumns) {
-                metadata.addField(HiveUtilities.mapHiveType(hiveCol));
-            }
-            // check partition fields
-            List<FieldSchema> hivePartitions = tbl.getPartitionKeys();
-            for (FieldSchema hivePart : hivePartitions) {
-                metadata.addField(HiveUtilities.mapHiveType(hivePart));
-            }
-        } catch (UnsupportedTypeException e) {
-            String errorMsg = "Failed to retrieve metadata for table " + metadata.getItem() + ". " +
-                    e.getMessage();
-            throw new UnsupportedTypeException(errorMsg);
-        }
+    private OutputFormat getOutputFormat(String inputFormat, boolean hasComplexTypes) throws Exception {
+        OutputFormat outputFormat = null;
+        InputFormat<?, ?> fformat = HiveDataFragmenter.makeInputFormat(inputFormat, jobConf);
+        String profile = ProfileFactory.get(fformat, hasComplexTypes);
+        String outputFormatClassName = ProfilesConf.getProfilePluginsMap(profile).get("X-GP-OUTPUTFORMAT");
+        outputFormat = OutputFormat.getOutputFormat(outputFormatClassName);
+        return outputFormat;
     }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCAccessor.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCAccessor.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCAccessor.java
index dc195f4..07348b0 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCAccessor.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCAccessor.java
@@ -30,6 +30,7 @@ import org.apache.hawq.pxf.api.BasicFilter;
 import org.apache.hawq.pxf.api.LogicalFilter;
 import org.apache.hawq.pxf.api.utilities.ColumnDescriptor;
 import org.apache.hawq.pxf.api.utilities.InputData;
+import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
 import org.apache.commons.lang.StringUtils;
 
 import java.sql.Date;
@@ -37,7 +38,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
-import static org.apache.hawq.pxf.plugins.hive.HiveInputFormatFragmenter.PXF_HIVE_SERDES;
+import static org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities.PXF_HIVE_SERDES;
 
 /**
  * Specialization of HiveAccessor for a Hive table that stores only ORC files.
@@ -61,9 +62,9 @@ public class HiveORCAccessor extends HiveAccessor {
      */
     public HiveORCAccessor(InputData input) throws Exception {
         super(input, new OrcInputFormat());
-        String[] toks = HiveInputFormatFragmenter.parseToks(input, PXF_HIVE_SERDES.ORC_SERDE.name());
-        initPartitionFields(toks[HiveInputFormatFragmenter.TOK_KEYS]);
-        filterInFragmenter = new Boolean(toks[HiveInputFormatFragmenter.TOK_FILTER_DONE]);
+        HiveUserData hiveUserData = HiveUtilities.parseHiveUserData(input, PXF_HIVE_SERDES.ORC_SERDE);
+        initPartitionFields(hiveUserData.getPartitionKeys());
+        filterInFragmenter = hiveUserData.isFilterInFragmenter();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCSerdeResolver.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCSerdeResolver.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCSerdeResolver.java
index 93aa474..fec0ff0 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCSerdeResolver.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveORCSerdeResolver.java
@@ -34,6 +34,7 @@ import org.apache.hawq.pxf.api.io.DataType;
 import org.apache.hawq.pxf.api.utilities.ColumnDescriptor;
 import org.apache.hawq.pxf.api.utilities.InputData;
 import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
+import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities.PXF_HIVE_SERDES;
 
 import java.util.*;
 
@@ -43,8 +44,7 @@ import java.util.*;
  */
 public class HiveORCSerdeResolver extends HiveResolver {
     private static final Log LOG = LogFactory.getLog(HiveORCSerdeResolver.class);
-    private OrcSerde deserializer;
-    private HiveInputFormatFragmenter.PXF_HIVE_SERDES serdeType;
+    private HiveUtilities.PXF_HIVE_SERDES serdeType;
     private String typesString;
 
     public HiveORCSerdeResolver(InputData input) throws Exception {
@@ -54,41 +54,16 @@ public class HiveORCSerdeResolver extends HiveResolver {
     /* read the data supplied by the fragmenter: inputformat name, serde name, partition keys */
     @Override
     void parseUserData(InputData input) throws Exception {
-        String[] toks = HiveInputFormatFragmenter.parseToks(input);
-        String serdeEnumStr = toks[HiveInputFormatFragmenter.TOK_SERDE];
-        if (serdeEnumStr.equals(HiveInputFormatFragmenter.PXF_HIVE_SERDES.ORC_SERDE.name())) {
-            serdeType = HiveInputFormatFragmenter.PXF_HIVE_SERDES.ORC_SERDE;
-        } else {
-            throw new UnsupportedTypeException("Unsupported Hive Serde: " + serdeEnumStr);
-        }
-        partitionKeys = toks[HiveInputFormatFragmenter.TOK_KEYS];
-        typesString = toks[HiveInputFormatFragmenter.TOK_COL_TYPES];
+        HiveUserData hiveUserData = HiveUtilities.parseHiveUserData(input, HiveUtilities.PXF_HIVE_SERDES.ORC_SERDE);
+        serdeType = PXF_HIVE_SERDES.getPxfHiveSerde(hiveUserData.getSerdeClassName());
+        partitionKeys = hiveUserData.getPartitionKeys();
+        typesString = hiveUserData.getColTypes();
         collectionDelim = input.getUserProperty("COLLECTION_DELIM") == null ? COLLECTION_DELIM
                 : input.getUserProperty("COLLECTION_DELIM");
         mapkeyDelim = input.getUserProperty("MAPKEY_DELIM") == null ? MAPKEY_DELIM
                 : input.getUserProperty("MAPKEY_DELIM");
     }
 
-    /**
-     * getFields returns a singleton list of OneField item.
-     * OneField item contains two fields: an integer representing the VARCHAR type and a Java
-     * Object representing the field value.
-     */
-    @Override
-    public List<OneField> getFields(OneRow onerow) throws Exception {
-
-        Object tuple = deserializer.deserialize((Writable) onerow.getData());
-        // Each Hive record is a Struct
-        StructObjectInspector soi = (StructObjectInspector) deserializer.getObjectInspector();
-        List<OneField> record = traverseStruct(tuple, soi, false);
-
-        //Add partition fields if any
-        record.addAll(getPartitionFields());
-
-        return record;
-
-    }
-
     /*
      * Get and init the deserializer for the records of this Hive data fragment.
      * Suppress Warnings added because deserializer.initialize is an abstract function that is deprecated
@@ -127,12 +102,7 @@ public class HiveORCSerdeResolver extends HiveResolver {
         serdeProperties.put(serdeConstants.LIST_COLUMNS, columnNames.toString());
         serdeProperties.put(serdeConstants.LIST_COLUMN_TYPES, columnTypes.toString());
 
-        if (serdeType == HiveInputFormatFragmenter.PXF_HIVE_SERDES.ORC_SERDE) {
-            deserializer = new OrcSerde();
-        } else {
-            throw new UnsupportedTypeException("Unsupported Hive Serde: " + serdeType.name()); /* we should not get here */
-        }
-
+        deserializer = HiveUtilities.createDeserializer(serdeType, HiveUtilities.PXF_HIVE_SERDES.ORC_SERDE);
         deserializer.initialize(new JobConf(new Configuration(), HiveORCSerdeResolver.class), serdeProperties);
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveRCFileAccessor.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveRCFileAccessor.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveRCFileAccessor.java
index 2686851..7132d7b 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveRCFileAccessor.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveRCFileAccessor.java
@@ -21,7 +21,7 @@ package org.apache.hawq.pxf.plugins.hive;
 
 
 import org.apache.hawq.pxf.api.utilities.InputData;
-
+import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
 import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
 import org.apache.hadoop.hive.ql.io.RCFileRecordReader;
 import org.apache.hadoop.mapred.FileSplit;
@@ -30,7 +30,7 @@ import org.apache.hadoop.mapred.JobConf;
 
 import java.io.IOException;
 
-import static org.apache.hawq.pxf.plugins.hive.HiveInputFormatFragmenter.PXF_HIVE_SERDES;
+import static org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities.PXF_HIVE_SERDES;
 
 /**
  * Specialization of HiveAccessor for a Hive table that stores only RC files.
@@ -47,9 +47,9 @@ public class HiveRCFileAccessor extends HiveAccessor {
      */
     public HiveRCFileAccessor(InputData input) throws Exception {
         super(input, new RCFileInputFormat());
-        String[] toks = HiveInputFormatFragmenter.parseToks(input, PXF_HIVE_SERDES.COLUMNAR_SERDE.name(), PXF_HIVE_SERDES.LAZY_BINARY_COLUMNAR_SERDE.name());
-        initPartitionFields(toks[HiveInputFormatFragmenter.TOK_KEYS]);
-        filterInFragmenter = new Boolean(toks[HiveInputFormatFragmenter.TOK_FILTER_DONE]);
+        HiveUserData hiveUserData = HiveUtilities.parseHiveUserData(input, PXF_HIVE_SERDES.COLUMNAR_SERDE, PXF_HIVE_SERDES.LAZY_BINARY_COLUMNAR_SERDE);
+        initPartitionFields(hiveUserData.getPartitionKeys());
+        filterInFragmenter = hiveUserData.isFilterInFragmenter();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveResolver.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveResolver.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveResolver.java
index 3837f78..5646969 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveResolver.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveResolver.java
@@ -27,6 +27,7 @@ import org.apache.hawq.pxf.api.utilities.InputData;
 import org.apache.hawq.pxf.api.utilities.Plugin;
 import org.apache.hawq.pxf.api.utilities.Utilities;
 import org.apache.hawq.pxf.plugins.hdfs.utilities.HdfsUtilities;
+import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
 import org.apache.commons.lang.CharUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -74,10 +75,10 @@ public class HiveResolver extends Plugin implements ReadResolver {
     protected static final String COLLECTION_DELIM = ",";
     protected String collectionDelim;
     protected String mapkeyDelim;
-    private SerDe deserializer;
+    protected SerDe deserializer;
     private List<OneField> partitionFields;
-    private String serdeName;
-    private String propsString;
+    protected String serdeClassName;
+    protected String propsString;
     String partitionKeys;
     protected char delimiter;
     String nullChar = "\\N";
@@ -133,19 +134,11 @@ public class HiveResolver extends Plugin implements ReadResolver {
 
     /* Parses user data string (arrived from fragmenter). */
     void parseUserData(InputData input) throws Exception {
-        final int EXPECTED_NUM_OF_TOKS = 5;
+        HiveUserData hiveUserData = HiveUtilities.parseHiveUserData(input);
 
-        String userData = new String(input.getFragmentUserData());
-        String[] toks = userData.split(HiveDataFragmenter.HIVE_UD_DELIM);
-
-        if (toks.length != EXPECTED_NUM_OF_TOKS) {
-            throw new UserDataException("HiveResolver expected "
-                    + EXPECTED_NUM_OF_TOKS + " tokens, but got " + toks.length);
-        }
-
-        serdeName = toks[1];
-        propsString = toks[2];
-        partitionKeys = toks[3];
+        serdeClassName = hiveUserData.getSerdeClassName();
+        propsString = hiveUserData.getPropertiesString();
+        partitionKeys = hiveUserData.getPartitionKeys();
 
         collectionDelim = input.getUserProperty("COLLECTION_DELIM") == null ? COLLECTION_DELIM
                 : input.getUserProperty("COLLECTION_DELIM");
@@ -160,14 +153,16 @@ public class HiveResolver extends Plugin implements ReadResolver {
     void initSerde(InputData inputData) throws Exception {
         Properties serdeProperties;
 
-        Class<?> c = Class.forName(serdeName, true, JavaUtils.getClassLoader());
+        Class<?> c = Class.forName(serdeClassName, true, JavaUtils.getClassLoader());
         deserializer = (SerDe) c.newInstance();
         serdeProperties = new Properties();
-        ByteArrayInputStream inStream = new ByteArrayInputStream(
-                propsString.getBytes());
-        serdeProperties.load(inStream);
-        deserializer.initialize(new JobConf(conf, HiveResolver.class),
-                serdeProperties);
+        if (propsString != null ) {
+            ByteArrayInputStream inStream = new ByteArrayInputStream(propsString.getBytes());
+            serdeProperties.load(inStream);
+        } else {
+            throw new IllegalArgumentException("propsString is mandatory to initialize serde.");
+        }
+        deserializer.initialize(new JobConf(conf, HiveResolver.class), serdeProperties);
     }
 
     /*
@@ -271,7 +266,7 @@ public class HiveResolver extends Plugin implements ReadResolver {
      * The partition fields are initialized one time based on userData provided
      * by the fragmenter.
      */
-    void initPartitionFields(StringBuilder parts) {
+    void initTextPartitionFields(StringBuilder parts) {
         if (partitionKeys.equals(HiveDataFragmenter.HIVE_NO_PART_TBL)) {
             return;
         }
@@ -625,47 +620,49 @@ public class HiveResolver extends Plugin implements ReadResolver {
      */
     void parseDelimiterChar(InputData input) {
 
-        String userDelim = input.getUserProperty("DELIMITER");
+        String userDelim = input.getUserProperty(InputData.DELIMITER_KEY);
 
         if (userDelim == null) {
-            throw new IllegalArgumentException("DELIMITER is a required option");
-        }
-
-        final int VALID_LENGTH = 1;
-        final int VALID_LENGTH_HEX = 4;
-
-        if (userDelim.startsWith("\\x")) { // hexadecimal sequence
-
-            if (userDelim.length() != VALID_LENGTH_HEX) {
+            /* No DELIMITER in URL, try to get it from fragment's user data*/
+            HiveUserData hiveUserData = null;
+            try {
+                hiveUserData = HiveUtilities.parseHiveUserData(input);
+            } catch (UserDataException ude) {
+                throw new IllegalArgumentException("Couldn't parse user data to get " + InputData.DELIMITER_KEY);
+            }
+            if (hiveUserData.getDelimiter() == null) {
+                throw new IllegalArgumentException(InputData.DELIMITER_KEY + " is a required option");
+            }
+            delimiter = (char) Integer.valueOf(hiveUserData.getDelimiter()).intValue();
+        } else {
+            final int VALID_LENGTH = 1;
+            final int VALID_LENGTH_HEX = 4;
+            if (userDelim.startsWith("\\x")) { // hexadecimal sequence
+                if (userDelim.length() != VALID_LENGTH_HEX) {
+                    throw new IllegalArgumentException(
+                            "Invalid hexdecimal value for delimiter (got"
+                                    + userDelim + ")");
+                }
+                delimiter = (char) Integer.parseInt(
+                        userDelim.substring(2, VALID_LENGTH_HEX), 16);
+                if (!CharUtils.isAscii(delimiter)) {
+                    throw new IllegalArgumentException(
+                            "Invalid delimiter value. Must be a single ASCII character, or a hexadecimal sequence (got non ASCII "
+                                    + delimiter + ")");
+                }
+                return;
+            }
+            if (userDelim.length() != VALID_LENGTH) {
                 throw new IllegalArgumentException(
-                        "Invalid hexdecimal value for delimiter (got"
+                        "Invalid delimiter value. Must be a single ASCII character, or a hexadecimal sequence (got "
                                 + userDelim + ")");
             }
-
-            delimiter = (char) Integer.parseInt(
-                    userDelim.substring(2, VALID_LENGTH_HEX), 16);
-
-            if (!CharUtils.isAscii(delimiter)) {
+            if (!CharUtils.isAscii(userDelim.charAt(0))) {
                 throw new IllegalArgumentException(
                         "Invalid delimiter value. Must be a single ASCII character, or a hexadecimal sequence (got non ASCII "
-                                + delimiter + ")");
+                                + userDelim + ")");
             }
-
-            return;
-        }
-
-        if (userDelim.length() != VALID_LENGTH) {
-            throw new IllegalArgumentException(
-                    "Invalid delimiter value. Must be a single ASCII character, or a hexadecimal sequence (got "
-                            + userDelim + ")");
+            delimiter = userDelim.charAt(0);
         }
-
-        if (!CharUtils.isAscii(userDelim.charAt(0))) {
-            throw new IllegalArgumentException(
-                    "Invalid delimiter value. Must be a single ASCII character, or a hexadecimal sequence (got non ASCII "
-                            + userDelim + ")");
-        }
-
-        delimiter = userDelim.charAt(0);
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveStringPassResolver.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveStringPassResolver.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveStringPassResolver.java
index fdc5f69..76d5cad 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveStringPassResolver.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveStringPassResolver.java
@@ -22,7 +22,11 @@ package org.apache.hawq.pxf.plugins.hive;
 
 import org.apache.hawq.pxf.api.OneField;
 import org.apache.hawq.pxf.api.OneRow;
+import org.apache.hawq.pxf.api.OutputFormat;
+import org.apache.hawq.pxf.api.UserDataException;
 import org.apache.hawq.pxf.api.utilities.InputData;
+import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
+import org.apache.hawq.pxf.service.utilities.ProtocolData;
 
 import java.util.Collections;
 import java.util.List;
@@ -42,21 +46,32 @@ public class HiveStringPassResolver extends HiveResolver {
 
     @Override
     void parseUserData(InputData input) throws Exception {
-        String userData = new String(input.getFragmentUserData());
-        String[] toks = userData.split(HiveDataFragmenter.HIVE_UD_DELIM);
+        HiveUserData hiveUserData = HiveUtilities.parseHiveUserData(input);
         parseDelimiterChar(input);
         parts = new StringBuilder();
-        partitionKeys = toks[HiveInputFormatFragmenter.TOK_KEYS];
+        partitionKeys = hiveUserData.getPartitionKeys();
+        serdeClassName = hiveUserData.getSerdeClassName();
+
+        /* Needed only for GPDBWritable format*/
+        if (((ProtocolData) inputData).outputFormat() == OutputFormat.GPDBWritable) {
+            propsString = hiveUserData.getPropertiesString();
+        }
     }
 
     @Override
-    void initSerde(InputData input) {
-        /* nothing to do here */
+    void initSerde(InputData input) throws Exception {
+        if (((ProtocolData) inputData).outputFormat() == OutputFormat.GPDBWritable) {
+            super.initSerde(input);
+        }
     }
 
     @Override
     void initPartitionFields() {
-        initPartitionFields(parts);
+        if (((ProtocolData) inputData).outputFormat() == OutputFormat.TEXT) {
+            initTextPartitionFields(parts);
+        } else {
+            super.initPartitionFields();
+        }
     }
 
     /**
@@ -66,9 +81,13 @@ public class HiveStringPassResolver extends HiveResolver {
      */
     @Override
     public List<OneField> getFields(OneRow onerow) throws Exception {
-        String line = (onerow.getData()).toString();
-
-        /* We follow Hive convention. Partition fields are always added at the end of the record */
-        return Collections.singletonList(new OneField(VARCHAR.getOID(), line + parts));
+        if (((ProtocolData) inputData).outputFormat() == OutputFormat.TEXT) {
+            String line = (onerow.getData()).toString();
+            /* We follow Hive convention. Partition fields are always added at the end of the record */
+            return Collections.singletonList(new OneField(VARCHAR.getOID(), line + parts));
+        } else {
+            return super.getFields(onerow);
+        }
     }
+
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveUserData.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveUserData.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveUserData.java
new file mode 100644
index 0000000..e3632e0
--- /dev/null
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/HiveUserData.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hawq.pxf.plugins.hive;
+
+/**
+ * Class which is a carrier for user data in Hive fragment.
+ *
+ */
+public class HiveUserData {
+
+    public static final String HIVE_UD_DELIM = "!HUDD!";
+    private static final int EXPECTED_NUM_OF_TOKS = 7;
+
+    public HiveUserData(String inputFormatName, String serdeClassName,
+            String propertiesString, String partitionKeys,
+            boolean filterInFragmenter,
+            String delimiter,
+            String colTypes) {
+
+        this.inputFormatName = inputFormatName;
+        this.serdeClassName = serdeClassName;
+        this.propertiesString = propertiesString;
+        this.partitionKeys = partitionKeys;
+        this.filterInFragmenter = filterInFragmenter;
+        this.delimiter = (delimiter == null ? "0" : delimiter);
+        this.colTypes = colTypes;
+    }
+
+    /**
+     * Returns input format of a fragment
+     *
+     * @return input format of a fragment
+     */
+    public String getInputFormatName() {
+        return inputFormatName;
+    }
+
+    /**
+     * Returns SerDe class name
+     *
+     * @return SerDe class name
+     */
+    public String getSerdeClassName() {
+        return serdeClassName;
+    }
+
+    /**
+     * Returns properties string needed for SerDe initialization
+     *
+     * @return properties string needed for SerDe initialization
+     */
+    public String getPropertiesString() {
+        return propertiesString;
+    }
+
+    /**
+     * Returns partition keys
+     *
+     * @return partition keys
+     */
+    public String getPartitionKeys() {
+        return partitionKeys;
+    }
+
+    /**
+     * Returns whether filtering was done in fragmenter
+     *
+     * @return true if filtering was done in fragmenter
+     */
+    public boolean isFilterInFragmenter() {
+        return filterInFragmenter;
+    }
+
+    /**
+     * Returns field delimiter
+     *
+     * @return field delimiter
+     */
+    public String getDelimiter() {
+        return delimiter;
+    }
+
+    public void setDelimiter(String delimiter) {
+        this.delimiter = delimiter;
+    }
+
+    private String inputFormatName;
+    private String serdeClassName;
+    private String propertiesString;
+    private String partitionKeys;
+    private boolean filterInFragmenter;
+    private String delimiter;
+    private String colTypes;
+
+    /**
+     * The method returns expected number of tokens in raw user data
+     *
+     * @return number of tokens in raw user data
+     */
+    public static int getNumOfTokens() {
+        return EXPECTED_NUM_OF_TOKS;
+    }
+
+    @Override
+    public String toString() {
+        return inputFormatName + HiveUserData.HIVE_UD_DELIM
+                + serdeClassName + HiveUserData.HIVE_UD_DELIM
+                + propertiesString + HiveUserData.HIVE_UD_DELIM
+                + partitionKeys + HiveUserData.HIVE_UD_DELIM
+                + filterInFragmenter + HiveUserData.HIVE_UD_DELIM
+                + delimiter + HiveUserData.HIVE_UD_DELIM
+                + colTypes;
+    }
+
+    public String getColTypes() {
+        return colTypes;
+    }
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/EnumHiveToHawqType.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/EnumHiveToHawqType.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/EnumHiveToHawqType.java
index d91e949..ea65a66 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/EnumHiveToHawqType.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/EnumHiveToHawqType.java
@@ -42,37 +42,48 @@ public enum EnumHiveToHawqType {
     FloatType("float", EnumHawqType.Float4Type),
     DoubleType("double", EnumHawqType.Float8Type),
     StringType("string", EnumHawqType.TextType),
-    BinaryType("binary", EnumHawqType.ByteaType),
+    BinaryType("binary", EnumHawqType.ByteaType, true),
     TimestampType("timestamp", EnumHawqType.TimestampType),
     DateType("date", EnumHawqType.DateType),
     DecimalType("decimal", EnumHawqType.NumericType, "[(,)]"),
     VarcharType("varchar", EnumHawqType.VarcharType, "[(,)]"),
     CharType("char", EnumHawqType.BpcharType, "[(,)]"),
-    ArrayType("array", EnumHawqType.TextType, "[<,>]"),
-    MapType("map", EnumHawqType.TextType, "[<,>]"),
-    StructType("struct", EnumHawqType.TextType, "[<,>]"),
-    UnionType("uniontype", EnumHawqType.TextType, "[<,>]");
+    ArrayType("array", EnumHawqType.TextType, "[<,>]", true),
+    MapType("map", EnumHawqType.TextType, "[<,>]", true),
+    StructType("struct", EnumHawqType.TextType, "[<,>]", true),
+    UnionType("uniontype", EnumHawqType.TextType, "[<,>]", true);
 
     private String typeName;
     private EnumHawqType hawqType;
     private String splitExpression;
     private byte size;
+    private boolean isComplexType;
 
     EnumHiveToHawqType(String typeName, EnumHawqType hawqType) {
         this.typeName = typeName;
         this.hawqType = hawqType;
     }
-    
+
     EnumHiveToHawqType(String typeName, EnumHawqType hawqType, byte size) {
         this(typeName, hawqType);
         this.size = size;
     }
 
+    EnumHiveToHawqType(String typeName, EnumHawqType hawqType, boolean isComplexType) {
+        this(typeName, hawqType);
+        this.isComplexType = isComplexType;
+    }
+
     EnumHiveToHawqType(String typeName, EnumHawqType hawqType, String splitExpression) {
         this(typeName, hawqType);
         this.splitExpression = splitExpression;
     }
 
+    EnumHiveToHawqType(String typeName, EnumHawqType hawqType, String splitExpression, boolean isComplexType) {
+        this(typeName, hawqType, splitExpression);
+        this.isComplexType = isComplexType;
+    }
+
     /**
      * 
      * @return name of type
@@ -216,4 +227,12 @@ public enum EnumHiveToHawqType {
         return size;
     }
 
+    public boolean isComplexType() {
+        return isComplexType;
+    }
+
+    public void setComplexType(boolean isComplexType) {
+        this.isComplexType = isComplexType;
+    }
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
index f7ebf4d..37f4ac2 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
@@ -35,17 +35,28 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.hive.serde2.*;
 import org.apache.hawq.pxf.api.Fragmenter;
 import org.apache.hawq.pxf.api.Metadata;
+import org.apache.hawq.pxf.api.Metadata.Field;
 import org.apache.hawq.pxf.api.UnsupportedTypeException;
+import org.apache.hawq.pxf.api.UserDataException;
 import org.apache.hawq.pxf.api.utilities.EnumHawqType;
+import org.apache.hawq.pxf.api.utilities.InputData;
+import org.apache.hawq.pxf.api.utilities.Utilities;
 import org.apache.hawq.pxf.api.io.DataType;
+import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
 import org.apache.hawq.pxf.plugins.hive.HiveDataFragmenter;
 import org.apache.hawq.pxf.plugins.hive.HiveInputFormatFragmenter;
 import org.apache.hawq.pxf.plugins.hive.HiveTablePartition;
+import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe;
+import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
 import org.apache.hawq.pxf.plugins.hive.HiveInputFormatFragmenter.PXF_HIVE_INPUT_FORMATS;
-import org.apache.hawq.pxf.plugins.hive.HiveInputFormatFragmenter.PXF_HIVE_SERDES;
+import org.apache.hawq.pxf.plugins.hive.HiveUserData;
+import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities.PXF_HIVE_SERDES;
 
 /**
  * Class containing helper functions connecting
@@ -53,6 +64,46 @@ import org.apache.hawq.pxf.plugins.hive.HiveInputFormatFragmenter.PXF_HIVE_SERDE
  */
 public class HiveUtilities {
 
+    /** Defines the Hive serializers (serde classes) currently supported in pxf */
+    public enum PXF_HIVE_SERDES {
+        COLUMNAR_SERDE("org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"),
+        LAZY_BINARY_COLUMNAR_SERDE("org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe"),
+        LAZY_SIMPLE_SERDE("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"),
+        ORC_SERDE("org.apache.hadoop.hive.ql.io.orc.OrcSerde");
+
+        private String serdeClassName;
+
+        PXF_HIVE_SERDES(String serdeClassName) {
+            this.serdeClassName = serdeClassName;
+        }
+
+        /**
+         * Method which looks up serde by serde class name.
+         *
+         * @param serdeClassName input serde name
+         * @param allowedSerdes all serdes which allowed in current context
+         * @return serde by given serde class name and list of allowed serdes
+         * @throws UnsupportedTypeException if unable to find serde by class name, or found serde which is not allowed in current context
+         */
+        public static PXF_HIVE_SERDES getPxfHiveSerde(String serdeClassName, PXF_HIVE_SERDES... allowedSerdes) {
+            for (PXF_HIVE_SERDES s : values()) {
+                if (s.getSerdeClassName().equals(serdeClassName)) {
+
+                    if (allowedSerdes.length > 0
+                            && !Arrays.asList(allowedSerdes).contains(s)) {
+                        throw new UnsupportedTypeException("Unsupported Hive Serde: " + serdeClassName);
+                    }
+                    return s;
+                }
+            }
+            throw new UnsupportedTypeException("Unable to find serde for class name: "+ serdeClassName);
+        }
+
+        public String getSerdeClassName() {
+            return serdeClassName;
+        }
+    }
+
     private static final Log LOG = LogFactory.getLog(HiveUtilities.class);
     private static final String WILDCARD = "*";
 
@@ -64,10 +115,7 @@ public class HiveUtilities {
     static final String STR_RC_FILE_INPUT_FORMAT = "org.apache.hadoop.hive.ql.io.RCFileInputFormat";
     static final String STR_TEXT_FILE_INPUT_FORMAT = "org.apache.hadoop.mapred.TextInputFormat";
     static final String STR_ORC_FILE_INPUT_FORMAT = "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat";
-    static final String STR_COLUMNAR_SERDE = "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe";
-    static final String STR_LAZY_BINARY_COLUMNAR_SERDE = "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe";
-    static final String STR_LAZY_SIMPLE_SERDE = "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe";
-    static final String STR_ORC_SERDE = "org.apache.hadoop.hive.ql.io.orc.OrcSerde";
+    private static final int DEFAULT_DELIMITER_CODE = 44;
 
     /**
      * Initializes the HiveMetaStoreClient
@@ -162,7 +210,7 @@ public class HiveUtilities {
         } else
             hiveTypeName = hiveType;
 
-        return new Metadata.Field(fieldName, hawqType, hiveTypeName, modifiers);
+        return new Metadata.Field(fieldName, hawqType, hiveToHawqType.isComplexType(), hiveTypeName, modifiers);
     }
 
     /**
@@ -376,31 +424,6 @@ public class HiveUtilities {
         }
     }
 
-    /*
-     * Validates that partition serde corresponds to PXF supported serdes and
-     * transforms the class name to an enumeration for writing it to the
-     * resolvers on other PXF instances.
-     */
-    private static String assertSerde(String className, HiveTablePartition partData)
-            throws Exception {
-        switch (className) {
-            case STR_COLUMNAR_SERDE:
-                return PXF_HIVE_SERDES.COLUMNAR_SERDE.name();
-            case STR_LAZY_BINARY_COLUMNAR_SERDE:
-                return PXF_HIVE_SERDES.LAZY_BINARY_COLUMNAR_SERDE.name();
-            case STR_LAZY_SIMPLE_SERDE:
-                return PXF_HIVE_SERDES.LAZY_SIMPLE_SERDE.name();
-            case STR_ORC_SERDE:
-                return PXF_HIVE_SERDES.ORC_SERDE.name();
-            default:
-                throw new UnsupportedTypeException(
-                        "HiveInputFormatFragmenter does not yet support  "
-                                + className + " for " + partData
-                                + ". Supported serializers are: "
-                                + Arrays.toString(PXF_HIVE_SERDES.values()));
-        }
-    }
-
 
     /* Turns the partition keys into a string */
     public static String serializePartitionKeys(HiveTablePartition partData) throws Exception {
@@ -429,10 +452,19 @@ public class HiveUtilities {
         return partitionKeys.toString();
     }
 
+    /**
+     * The method which serializes fragment-related attributes, needed for reading and resolution to string
+     *
+     * @param fragmenterClassName
+     * @param partData
+     * @param filterInFragmenter
+     * @return serialized representation of fragment-related attributes
+     * @throws Exception
+     */
     @SuppressWarnings("unchecked")
     public static byte[] makeUserData(String fragmenterClassName, HiveTablePartition partData, boolean filterInFragmenter) throws Exception {
 
-        String userData = null;
+        HiveUserData hiveUserData = null;
 
         if (fragmenterClassName == null) {
             throw new IllegalArgumentException("No fragmenter provided.");
@@ -440,25 +472,158 @@ public class HiveUtilities {
 
         Class fragmenterClass = Class.forName(fragmenterClassName);
 
+        String inputFormatName = partData.storageDesc.getInputFormat();
+        String serdeClassName = partData.storageDesc.getSerdeInfo().getSerializationLib();
+        String propertiesString = serializeProperties(partData.properties);
+        String partitionKeys = serializePartitionKeys(partData);
+        String delimiter = getDelimiterCode(partData.storageDesc).toString();
+        String colTypes = partData.properties.getProperty("columns.types");
+
         if (HiveInputFormatFragmenter.class.isAssignableFrom(fragmenterClass)) {
-            String inputFormatName = partData.storageDesc.getInputFormat();
-            String serdeName = partData.storageDesc.getSerdeInfo().getSerializationLib();
-            String partitionKeys = serializePartitionKeys(partData);
-            String colTypes = partData.properties.getProperty("columns.types");
             assertFileType(inputFormatName, partData);
-            userData = assertSerde(serdeName, partData) + HiveDataFragmenter.HIVE_UD_DELIM
-                    + partitionKeys + HiveDataFragmenter.HIVE_UD_DELIM + filterInFragmenter + HiveDataFragmenter.HIVE_UD_DELIM + colTypes;
-        } else if (HiveDataFragmenter.class.isAssignableFrom(fragmenterClass)){
-            String inputFormatName = partData.storageDesc.getInputFormat();
-            String serdeName = partData.storageDesc.getSerdeInfo().getSerializationLib();
-            String propertiesString = serializeProperties(partData.properties);
-            String partitionKeys = serializePartitionKeys(partData);
-            userData = inputFormatName + HiveDataFragmenter.HIVE_UD_DELIM + serdeName
-                    + HiveDataFragmenter.HIVE_UD_DELIM + propertiesString + HiveDataFragmenter.HIVE_UD_DELIM
-                    + partitionKeys + HiveDataFragmenter.HIVE_UD_DELIM + filterInFragmenter;
-        } else {
-            throw new IllegalArgumentException("HiveUtilities#makeUserData is not implemented for " + fragmenterClassName);
         }
-        return userData.getBytes();
+
+        hiveUserData = new HiveUserData(inputFormatName, serdeClassName, propertiesString, partitionKeys, filterInFragmenter, delimiter, colTypes);
+
+        return hiveUserData.toString().getBytes();
+    }
+
+    /**
+     * The method parses raw user data into HiveUserData class
+     *
+     * @param input input data
+     * @param supportedSerdes list of allowed serdes in current context
+     * @return instance of HiveUserData class
+     * @throws UserDataException
+     */
+    public static HiveUserData parseHiveUserData(InputData input, PXF_HIVE_SERDES... supportedSerdes) throws UserDataException{
+        String userData = new String(input.getFragmentUserData());
+        String[] toks = userData.split(HiveUserData.HIVE_UD_DELIM, HiveUserData.getNumOfTokens());
+
+        if (toks.length != (HiveUserData.getNumOfTokens())) {
+            throw new UserDataException("HiveInputFormatFragmenter expected "
+                    + HiveUserData.getNumOfTokens() + " tokens, but got " + toks.length);
+        }
+
+        HiveUserData hiveUserData = new HiveUserData(toks[0], toks[1], toks[2], toks[3], Boolean.valueOf(toks[4]), toks[5], toks[6]);
+
+        if (supportedSerdes.length > 0) {
+            /* Make sure this serde is supported */
+            PXF_HIVE_SERDES pxfHiveSerde = PXF_HIVE_SERDES.getPxfHiveSerde(hiveUserData.getSerdeClassName(), supportedSerdes);
+        }
+
+        return hiveUserData;
+    }
+
+    private static String getSerdeParameter(StorageDescriptor sd, String parameterKey) {
+        String parameterValue = null;
+        if (sd != null && sd.getSerdeInfo() != null && sd.getSerdeInfo().getParameters() != null && sd.getSerdeInfo().getParameters().get(parameterKey) != null) {
+            parameterValue = sd.getSerdeInfo().getParameters().get(parameterKey);
+        }
+
+        return parameterValue;
+    }
+
+    /**
+     * The method which extracts field delimiter from storage descriptor.
+     * When unable to extract delimiter from storage descriptor, default value is used
+     *
+     * @param sd StorageDescriptor of table/partition
+     * @return ASCII code of delimiter
+     */
+    public static Integer getDelimiterCode(StorageDescriptor sd) {
+        Integer delimiterCode = null;
+
+        String delimiter = getSerdeParameter(sd, serdeConstants.FIELD_DELIM);
+        if (delimiter != null) {
+            delimiterCode = (int) delimiter.charAt(0);
+            return delimiterCode;
+        }
+
+        delimiter = getSerdeParameter(sd, serdeConstants.SERIALIZATION_FORMAT);
+        if (delimiter != null) {
+            delimiterCode = Integer.parseInt(delimiter);
+            return delimiterCode;
+        }
+
+        return DEFAULT_DELIMITER_CODE;
+    }
+
+    /**
+     * The method determines whether metadata definition has any complex type
+     * @see EnumHiveToHawqType for complex type attribute definition
+     *
+     * @param metadata metadata of relation
+     * @return true if metadata has at least one field of complex type
+     */
+    public static boolean hasComplexTypes(Metadata metadata) {
+        boolean hasComplexTypes = false;
+        List<Field> fields = metadata.getFields();
+        for (Field field: fields) {
+            if (field.isComplexType()) {
+                hasComplexTypes = true;
+                break;
+            }
+        }
+
+        return hasComplexTypes;
+    }
+
+    /**
+     * Populates the given metadata object with the given table's fields and partitions,
+     * The partition fields are added at the end of the table schema.
+     * Throws an exception if the table contains unsupported field types.
+     * Supported HCatalog types: TINYINT,
+     * SMALLINT, INT, BIGINT, BOOLEAN, FLOAT, DOUBLE, STRING, BINARY, TIMESTAMP,
+     * DATE, DECIMAL, VARCHAR, CHAR.
+     *
+     * @param tbl Hive table
+     * @param metadata schema of given table
+     */
+    public static void getSchema(Table tbl, Metadata metadata) {
+
+        int hiveColumnsSize = tbl.getSd().getColsSize();
+        int hivePartitionsSize = tbl.getPartitionKeysSize();
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Hive table: " + hiveColumnsSize + " fields, " + hivePartitionsSize + " partitions.");
+        }
+
+        // check hive fields
+        try {
+            List<FieldSchema> hiveColumns = tbl.getSd().getCols();
+            for (FieldSchema hiveCol : hiveColumns) {
+                metadata.addField(HiveUtilities.mapHiveType(hiveCol));
+            }
+            // check partition fields
+            List<FieldSchema> hivePartitions = tbl.getPartitionKeys();
+            for (FieldSchema hivePart : hivePartitions) {
+                metadata.addField(HiveUtilities.mapHiveType(hivePart));
+            }
+        } catch (UnsupportedTypeException e) {
+            String errorMsg = "Failed to retrieve metadata for table " + metadata.getItem() + ". " +
+                    e.getMessage();
+            throw new UnsupportedTypeException(errorMsg);
+        }
+    }
+
+    /**
+     * Creates an instance of a given serde type
+     *
+     * @param serdeType
+     * @param allowedSerdes
+     * @return instance of a given serde
+     * @throws UnsupportedTypeException if given serde is not allowed in current context
+     */
+    @SuppressWarnings("deprecation")
+    public static SerDe createDeserializer(PXF_HIVE_SERDES serdeType, PXF_HIVE_SERDES... allowedSerdes) throws Exception{
+        SerDe deserializer = null;
+        if (!Arrays.asList(allowedSerdes).contains(serdeType)) {
+            throw new UnsupportedTypeException("Unsupported Hive Serde: " + serdeType.name());
+        }
+
+        deserializer = (SerDe) Utilities.createAnyInstance(serdeType.getSerdeClassName());
+
+        return deserializer;
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/ProfileFactory.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/ProfileFactory.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/ProfileFactory.java
new file mode 100644
index 0000000..f36f074
--- /dev/null
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/ProfileFactory.java
@@ -0,0 +1,61 @@
+package org.apache.hawq.pxf.plugins.hive.utilities;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
+import org.apache.hadoop.mapred.InputFormat;
+import org.apache.hadoop.mapred.TextInputFormat;
+import org.apache.hawq.pxf.api.Metadata;
+
+/**
+ * Factory class which returns optimal profile for given input format
+ *
+ */
+public class ProfileFactory {
+
+    private static final String HIVE_TEXT_PROFILE = "HiveText";
+    private static final String HIVE_RC_PROFILE = "HiveRC";
+    private static final String HIVE_ORC_PROFILE = "HiveORC";
+    private static final String HIVE_PROFILE = "Hive";
+
+    /**
+     * The method which returns optimal profile
+     *
+     * @param inputFormat input format of table/partition
+     * @param hasComplexTypes whether record has complex types, see @EnumHiveToHawqType
+     * @return name of optimal profile
+     */
+    public static String get(InputFormat inputFormat, boolean hasComplexTypes) {
+        String profileName = null;
+        if (inputFormat instanceof TextInputFormat && !hasComplexTypes) {
+            profileName = HIVE_TEXT_PROFILE;
+        } else if (inputFormat instanceof RCFileInputFormat) {
+            profileName = HIVE_RC_PROFILE;
+        } else if (inputFormat instanceof OrcInputFormat) {
+            profileName = HIVE_ORC_PROFILE;
+        } else {
+            //Default case
+            profileName = HIVE_PROFILE;
+        }
+        return profileName;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/HiveMetadataFetcherTest.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/HiveMetadataFetcherTest.java b/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/HiveMetadataFetcherTest.java
index d9d97fc..6e40f9a 100644
--- a/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/HiveMetadataFetcherTest.java
+++ b/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/HiveMetadataFetcherTest.java
@@ -132,6 +132,7 @@ public class HiveMetadataFetcherTest {
         fields.add(new FieldSchema("field2", "int", null));
         StorageDescriptor sd = new StorageDescriptor();
         sd.setCols(fields);
+        sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
         Table hiveTable = new Table();
         hiveTable.setTableType("MANAGED_TABLE");
         hiveTable.setSd(sd);
@@ -176,6 +177,7 @@ public class HiveMetadataFetcherTest {
         fields.add(new FieldSchema("field2", "int", null));
         StorageDescriptor sd = new StorageDescriptor();
         sd.setCols(fields);
+        sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
 
         // Mock hive tables returned from hive client
         for(int index=1;index<=2;index++) {
@@ -235,6 +237,7 @@ public class HiveMetadataFetcherTest {
         fields.add(new FieldSchema("field2", "int", null));
         StorageDescriptor sd = new StorageDescriptor();
         sd.setCols(fields);
+        sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat");
         Table hiveTable2 = new Table();
         hiveTable2.setTableType("MANAGED_TABLE");
         hiveTable2.setSd(sd);

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/6fa1ced2/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/HiveORCAccessorTest.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/HiveORCAccessorTest.java b/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/HiveORCAccessorTest.java
index 7bbe811..8b4bf13 100644
--- a/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/HiveORCAccessorTest.java
+++ b/pxf/pxf-hive/src/test/java/org/apache/hawq/pxf/plugins/hive/HiveORCAccessorTest.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.mapred.*;
 import org.apache.hawq.pxf.api.utilities.ColumnDescriptor;
 import org.apache.hawq.pxf.api.utilities.InputData;
 import org.apache.hawq.pxf.plugins.hdfs.utilities.HdfsUtilities;
+import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities;
+import org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities.PXF_HIVE_SERDES;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -43,7 +45,7 @@ import static org.mockito.Mockito.when;
 
 
 @RunWith(PowerMockRunner.class)
-@PrepareForTest({HiveORCAccessor.class, HiveInputFormatFragmenter.class, HdfsUtilities.class, HiveDataFragmenter.class})
+@PrepareForTest({HiveORCAccessor.class, HiveUtilities.class, HdfsUtilities.class, HiveDataFragmenter.class})
 @SuppressStaticInitializationFor({"org.apache.hadoop.mapred.JobConf",
         "org.apache.hadoop.hive.metastore.api.MetaException",
         "org.apache.hawq.pxf.plugins.hive.utilities.HiveUtilities"}) // Prevents static inits
@@ -61,8 +63,9 @@ public class HiveORCAccessorTest {
         jobConf = new JobConf();
         PowerMockito.whenNew(JobConf.class).withAnyArguments().thenReturn(jobConf);
 
-        PowerMockito.mockStatic(HiveInputFormatFragmenter.class);
-        PowerMockito.when(HiveInputFormatFragmenter.parseToks(any(InputData.class), any(String[].class))).thenReturn(new String[]{"", HiveDataFragmenter.HIVE_NO_PART_TBL, "true"});
+        PowerMockito.mockStatic(HiveUtilities.class);
+        PowerMockito.when(HiveUtilities.parseHiveUserData(any(InputData.class), any(PXF_HIVE_SERDES[].class))).thenReturn(new HiveUserData("", "", null, HiveDataFragmenter.HIVE_NO_PART_TBL, true, "1", ""));
+
         PowerMockito.mockStatic(HdfsUtilities.class);
 
         PowerMockito.mockStatic(HiveDataFragmenter.class);



[14/50] [abbrv] incubator-hawq git commit: HAWQ-1257. Prompt all tables which user doesn't have right once

Posted by es...@apache.org.
HAWQ-1257. Prompt all tables which user doesn't have right once


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/bf4742cb
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/bf4742cb
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/bf4742cb

Branch: refs/heads/2.1.0.0-incubating
Commit: bf4742cb6c4cd71c9174f030c54e28b8c2595942
Parents: 3b15739
Author: interma <in...@outlook.com>
Authored: Wed Jan 11 14:51:21 2017 +0800
Committer: Wen Lin <wl...@pivotal.io>
Committed: Thu Jan 12 17:45:20 2017 +0800

----------------------------------------------------------------------
 src/backend/catalog/aclchk.c        |  60 +++++++++---
 src/backend/libpq/rangerrest.c      | 139 ++++++++++++++++++++------
 src/backend/parser/parse_relation.c | 163 ++++++++++++++++++-------------
 src/include/utils/rangerrest.h      |  13 ++-
 4 files changed, 255 insertions(+), 120 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bf4742cb/src/backend/catalog/aclchk.c
----------------------------------------------------------------------
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 01a4f94..73de11b 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -2710,17 +2710,23 @@ List *pg_rangercheck_batch(List *arg_list)
   List *aclresults = NIL;
   List *requestargs = NIL;
   ListCell *arg;
+  elog(LOG, "rangeracl batch check, acl list length:%d\n", arg_list->length);
   foreach(arg, arg_list) {
     RangerPrivilegeArgs *arg_ptr = (RangerPrivilegeArgs *) lfirst(arg);
+
     AclObjectKind objkind = arg_ptr->objkind;
     Oid object_oid = arg_ptr->object_oid;
     char *objectname = getNameFromOid(objkind, object_oid);
     char *rolename = getRoleName(arg_ptr->roleid);
     List* actions = getActionName(arg_ptr->mask);
     bool isAll = (arg_ptr->how == ACLMASK_ALL) ? true: false;
+
     RangerPrivilegeResults *aclresult = (RangerPrivilegeResults *) palloc(sizeof(RangerPrivilegeResults));
-    aclresult->result = -1;
+    aclresult->result = RANGERCHECK_NO_PRIV;
     aclresult->relOid = object_oid;
+    // this two sign fields will be set in create_ranger_request_json()
+    aclresult->resource_sign = 0;
+    aclresult->privilege_sign = 0;
     aclresults = lappend(aclresults, aclresult);
 
     RangerRequestJsonArgs *requestarg = (RangerRequestJsonArgs *) palloc(sizeof(RangerRequestJsonArgs));
@@ -2733,14 +2739,15 @@ List *pg_rangercheck_batch(List *arg_list)
 
   } // foreach
 
-  RangerACLResult ret = check_privilege_from_ranger(requestargs);
-
-  ListCell *result;
-  int k = 0;
-  foreach(result, aclresults) {
-    RangerPrivilegeResults *result_ptr = (RangerPrivilegeResults *) lfirst(result);
-    result_ptr->result = ret;
-    ++k;
+  int ret = check_privilege_from_ranger(requestargs, aclresults);
+  if (ret < 0)
+  {
+	  elog(WARNING, "ranger service unavailable or unexpected error\n");
+	  ListCell *result;
+	  foreach(result, aclresults) {
+		  RangerPrivilegeResults *result_ptr = (RangerPrivilegeResults *) lfirst(result);
+		  result_ptr->result = RANGERCHECK_NO_PRIV;
+	  }
   }
 
   if(requestargs) {
@@ -2760,10 +2767,6 @@ List *pg_rangercheck_batch(List *arg_list)
     requestargs = NULL;
   }
 
-  if(ret != RANGERCHECK_OK){
-    elog(ERROR, "ACL check failed\n");
-  }
-  elog(LOG, "oids%d\n", arg_list->length);
   return aclresults;
 }
 
@@ -2777,6 +2780,16 @@ pg_rangercheck(AclObjectKind objkind, Oid object_oid, Oid roleid,
 	bool isAll = (how == ACLMASK_ALL) ? true: false;
 
 	elog(LOG, "rangeraclcheck kind:%d,objectname:%s,role:%s,mask:%u\n",objkind,objectname,rolename,mask);
+
+	List *resultargs = NIL;
+    RangerPrivilegeResults *aclresult = (RangerPrivilegeResults *) palloc(sizeof(RangerPrivilegeResults));
+    aclresult->result = RANGERCHECK_NO_PRIV;
+    aclresult->relOid = object_oid;
+	// this two sign fields will be set in create_ranger_request_json()
+	aclresult->resource_sign = 0;
+	aclresult->privilege_sign = 0;
+    resultargs = lappend(resultargs, aclresult);
+
 	List *requestargs = NIL;
 	RangerRequestJsonArgs *requestarg = (RangerRequestJsonArgs *) palloc(sizeof(RangerRequestJsonArgs));
 	requestarg->user = rolename;
@@ -2785,8 +2798,25 @@ pg_rangercheck(AclObjectKind objkind, Oid object_oid, Oid roleid,
 	requestarg->actions = actions;
 	requestarg->isAll = isAll;
 	requestargs = lappend(requestargs, requestarg);
-	int ret = check_privilege_from_ranger(requestargs);
 
+	AclResult result = ACLCHECK_NO_PRIV;	
+	int ret = check_privilege_from_ranger(requestargs, resultargs);
+	if (ret == 0) 
+	{
+		ListCell *arg;
+		foreach(arg, resultargs) {
+			// only one element
+			RangerPrivilegeResults *arg_ptr = (RangerPrivilegeResults *) lfirst(arg);
+			if (arg_ptr->result == RANGERCHECK_OK)
+				result = ACLCHECK_OK;
+			break;
+		}
+	}
+
+	if (resultargs)
+	{
+		list_free_deep(resultargs);
+	}
 	if (requestargs)
 	{
 		ListCell *cell = list_head(requestargs);
@@ -2802,7 +2832,7 @@ pg_rangercheck(AclObjectKind objkind, Oid object_oid, Oid roleid,
 		list_free_deep(requestargs);
 		requestargs = NULL;
 	}
-	return ret;
+	return result;
 }
 
 /*

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bf4742cb/src/backend/libpq/rangerrest.c
----------------------------------------------------------------------
diff --git a/src/backend/libpq/rangerrest.c b/src/backend/libpq/rangerrest.c
index 5406251..74777dc 100644
--- a/src/backend/libpq/rangerrest.c
+++ b/src/backend/libpq/rangerrest.c
@@ -25,6 +25,7 @@
  *-------------------------------------------------------------------------
  */
 #include "utils/rangerrest.h"
+#include "utils/hsearch.h"
 /*
  * A mapping from AclObjectKind to string
  */
@@ -68,12 +69,16 @@ static void getClientIP(char *remote_host)
 	}
 }
 
-RangerACLResult parse_ranger_response(char* buffer)
+/*
+ * parse ranger response
+ * @param	buffer	ranger response 	
+ * @param	result_list		List of RangerPrivilegeResults
+ * @return	0 parse success; -1 other error
+ */
+static int parse_ranger_response(char* buffer, List *result_list)
 {
 	if (buffer == NULL || strlen(buffer) == 0)
-	{
-		return RANGERCHECK_UNKNOWN;
-	}
+		return -1;
 
 	elog(DEBUG3, "parse ranger restful response content : %s", buffer);
 
@@ -81,40 +86,84 @@ RangerACLResult parse_ranger_response(char* buffer)
 	if (response == NULL) 
 	{
 		elog(WARNING, "json_tokener_parse failed");
-		return RANGERCHECK_NO_PRIV;
+		return -1;
 	}
 
 	struct json_object *accessObj = NULL;
 	if (!json_object_object_get_ex(response, "access", &accessObj))
 	{
 		elog(WARNING, "get json access field failed");
-		return RANGERCHECK_NO_PRIV;
+		return -1;
 	}
 
 	int arraylen = json_object_array_length(accessObj);
 	elog(DEBUG3, "parse ranger response result array length: %d",arraylen);
-
-	// here should return which table's acl check failed in future.
 	for (int i=0; i< arraylen; i++){
 		struct json_object *jvalue = NULL;
 		struct json_object *jallow = NULL;
+		struct json_object *jresource = NULL;
+		struct json_object *jprivilege = NULL;
 
 		jvalue = json_object_array_get_idx(accessObj, i);
+		if (jvalue == NULL) 
+			return -1;
 		if (!json_object_object_get_ex(jvalue, "allowed", &jallow))
-		{
-			return RANGERCHECK_NO_PRIV;
-		}
-		json_bool result = json_object_get_boolean(jallow);
-		if(result != 1){
-			return RANGERCHECK_NO_PRIV;
+			return -1;
+		if (!json_object_object_get_ex(jvalue, "resource", &jresource))
+			return -1;
+		if (!json_object_object_get_ex(jvalue, "privileges", &jprivilege))
+			return -1;
+		
+		json_bool ok = json_object_get_boolean(jallow);
+
+		const char *resource_str = json_object_get_string(jresource);
+		const char *privilege_str = json_object_get_string(jprivilege);
+		uint32 resource_sign = string_hash(resource_str, strlen(resource_str));
+		uint32 privilege_sign = string_hash(privilege_str, strlen(privilege_str));
+		elog(DEBUG3, "ranger response access sign, resource_str:%s, privilege_str:%s", 
+			resource_str, privilege_str);
+
+		ListCell *result;
+		/* get each resource result by use sign */
+		foreach(result, result_list) {
+			/* loop find is enough for performence*/
+			RangerPrivilegeResults *result_ptr = (RangerPrivilegeResults *) lfirst(result);
+			/* if only one access in response, no need to check sign*/
+			if (arraylen > 1 &&  
+				(result_ptr->resource_sign != resource_sign || result_ptr->privilege_sign != privilege_sign) )
+				continue;
+
+			if (ok == 1)
+				result_ptr->result = RANGERCHECK_OK;
+			else 
+				result_ptr->result = RANGERCHECK_NO_PRIV;
 		}
 	}
-	return RANGERCHECK_OK;
+	return 0;
+}
+
+/**
+ * convert a string to lower
+ */ 
+static void str_tolower(char *dest, const char *src)
+{
+	Assert(src != NULL);
+	Assert(dest != NULL);
+	int len = strlen(src);
+	for (int i = 0; i < len; i++)
+	{
+		unsigned char ch = (unsigned char) src[i];
 
+		if (ch >= 'A' && ch <= 'Z')
+			ch += 'a' - 'A';
+		*(dest+i) = ch;
+	}	
+	dest[len] = '\0';
 }
+
 /**
  * Create a JSON object for Ranger request given some parameters.
- *
+ * example:
  *   {
  *     "requestId": 1,
  *     "user": "joe",
@@ -141,10 +190,12 @@ RangerACLResult parse_ranger_response(char* buffer)
  *         }
  *       ]
  *   }
- *
- *   args: List of RangerRequestJsonArgs
+ * 
+ * @param	request_list	List of RangerRequestJsonArgs
+ * @param	result_list		List of RangerPrivilegeResults
+ * @return	the parsed json object
  */
-json_object *create_ranger_request_json(List *args)
+static json_object *create_ranger_request_json(List *request_list, List *result_list)
 {
 	json_object *jrequest = json_object_new_object();
 	json_object *juser = NULL;
@@ -152,7 +203,8 @@ json_object *create_ranger_request_json(List *args)
 	char *user = NULL;
 	ListCell *arg;
 
-	foreach(arg, args)
+	int j = 0;
+	foreach(arg, request_list)
 	{
 		RangerRequestJsonArgs *arg_ptr = (RangerRequestJsonArgs *) lfirst(arg);
 		if (user == NULL)
@@ -162,7 +214,7 @@ json_object *create_ranger_request_json(List *args)
 		}
 		AclObjectKind kind = arg_ptr->kind;
 		char* object = arg_ptr->object;
-		Assert(user != NULL && object != NULL && privilege != NULL && arg_ptr->isAll);
+		Assert(user != NULL && object != NULL);
 		elog(DEBUG3, "build json for ranger restful request, user:%s, kind:%s, object:%s",
 				user, AclObjectKindStr[kind], object);
 
@@ -249,12 +301,27 @@ json_object *create_ranger_request_json(List *args)
 		ListCell *cell;
 		foreach(cell, arg_ptr->actions)
 		{
-		    json_object* jaction = json_object_new_string((char *)cell->data.ptr_value);
+			/* need more normalization in future */
+			char lower_action[32];
+			str_tolower(lower_action, (char *)cell->data.ptr_value);
+			lower_action[sizeof(lower_action)-1] = '\0';
+
+		    json_object* jaction = json_object_new_string(lower_action);
 		    json_object_array_add(jactions, jaction);
 		}
 		json_object_object_add(jelement, "privileges", jactions);
 		json_object_array_add(jaccess, jelement);
-
+		
+		/* set access sign */  
+		RangerPrivilegeResults *result_ptr = (RangerPrivilegeResults *)list_nth(result_list, j);			
+		const char *resource_str = json_object_to_json_string(jresource);
+		const char *privilege_str = json_object_to_json_string(jactions);
+		result_ptr->resource_sign = string_hash(resource_str, strlen(resource_str));
+		result_ptr->privilege_sign = string_hash(privilege_str, strlen(privilege_str));
+		elog(DEBUG3, "request access sign, resource_str:%s, privilege_str:%s", 
+			resource_str, privilege_str);
+		
+		j++;
 	} // foreach
 	char str[32];
 	sprintf(str,"%d",request_id);
@@ -310,9 +377,9 @@ static size_t write_callback(char *contents, size_t size, size_t nitems,
 }
 
 /**
- * @returns: 0 curl success; -1 curl failed
+ * @return	0 curl success; -1 curl failed
  */
-int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
+static int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
 {
 	int ret = -1;
 	CURLcode res;
@@ -339,6 +406,7 @@ int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
 	appendStringInfo(&tname, "/");
 	appendStringInfo(&tname, "%s", rps_addr_suffix);
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_URL, tname.data);
+	pfree(tname.data);	
 
 	struct curl_slist *headers = NULL;
 	headers = curl_slist_append(headers, "Content-Type:application/json");
@@ -373,28 +441,37 @@ int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
 }
 
 /*
- * arg_list: List of RangerRequestJsonArgs
+ * check privilege(s) from ranger
+ * @param	request_list	List of RangerRequestJsonArgs
+ * @param	result_list		List of RangerPrivilegeResults
+ * @return	0 get response from ranger and parse success; -1 other error
  */
-int check_privilege_from_ranger(List *arg_list)
+int check_privilege_from_ranger(List *request_list, List *result_list)
 {
-	json_object* jrequest = create_ranger_request_json(arg_list);
+	json_object* jrequest = create_ranger_request_json(request_list, result_list);
 	Assert(jrequest != NULL);
+
 	const char *request = json_object_to_json_string(jrequest);
-	elog(DEBUG3, "send json request to ranger : %s", request);
 	Assert(request != NULL);
+	elog(DEBUG3, "send json request to ranger : %s", request);
 
 	/* call GET method to send request*/
 	Assert(curl_context_ranger.hasInited);
 	if (call_ranger_rest(&curl_context_ranger, request) < 0)
 	{
-		return RANGERCHECK_NO_PRIV;
+		return -1;
 	}
 
 	/* free the JSON object */
 	json_object_put(jrequest);
 
 	/* parse the JSON-format result */
-	RangerACLResult ret = parse_ranger_response(curl_context_ranger.response.buffer);
+	int ret = parse_ranger_response(curl_context_ranger.response.buffer, result_list);
+	if (ret < 0)
+	{
+		elog(WARNING, "parse ranger response failed, response[%s]", 
+			curl_context_ranger.response.buffer == NULL? "":curl_context_ranger.response.buffer);
+	}
 	if (curl_context_ranger.response.buffer != NULL)
 	{
 		/* reset response size to reuse the buffer. */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bf4742cb/src/backend/parser/parse_relation.c
----------------------------------------------------------------------
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index f9444ef..1dc6b86 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -2714,12 +2714,12 @@ warnAutoRange(ParseState *pstate, RangeVar *relation, int location)
 void
 ExecCheckRTPerms(List *rangeTable)
 {
-  if (enable_ranger && !fallBackToNativeChecks(ACL_KIND_CLASS,rangeTable,GetUserId()))
-  {
-    if(rangeTable!=NULL)
-      ExecCheckRTPermsWithRanger(rangeTable);
-    return;
-  }
+	if (enable_ranger && !fallBackToNativeChecks(ACL_KIND_CLASS,rangeTable,GetUserId()))
+	{
+		if(rangeTable!=NULL)
+			ExecCheckRTPermsWithRanger(rangeTable);
+		return;
+	}
 	ListCell   *l;
 	foreach(l, rangeTable)
 	{
@@ -2734,70 +2734,93 @@ ExecCheckRTPerms(List *rangeTable)
 void
 ExecCheckRTPermsWithRanger(List *rangeTable)
 {
-  List *ranger_check_args = NIL;
-  ListCell *l;
-  foreach(l, rangeTable)
-  {
-
-    AclMode requiredPerms;
-    Oid relOid;
-    Oid userid;
-    RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
-
-    if (rte->rtekind != RTE_RELATION)
-      continue;
-    requiredPerms = rte->requiredPerms;
-    if (requiredPerms == 0)
-      continue;
-    
-    relOid = rte->relid;
-    userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
-
-    RangerPrivilegeArgs *ranger_check_arg = (RangerPrivilegeArgs *) palloc(sizeof(RangerPrivilegeArgs));
-    ranger_check_arg->objkind = ACL_KIND_CLASS;
-    ranger_check_arg->object_oid = relOid;
-    ranger_check_arg->roleid = userid;
-    ranger_check_arg->mask = requiredPerms;
-    ranger_check_arg->how = ACLMASK_ALL;
-    ranger_check_args = lappend(ranger_check_args, ranger_check_arg);
-
-  } // foreach
-
-  if (ranger_check_args == NIL)
-    return;
-
-  // ranger ACL check with package Oids
-  List *aclresults = NIL;
-  aclresults = pg_rangercheck_batch(ranger_check_args);
-  if (aclresults == NIL)
-  {
-    elog(ERROR, "ACL check failed\n");
-    return;
-  }
-
-  // check result
-  ListCell *result;
-  foreach(result, aclresults)
-  {
-    RangerPrivilegeResults *result_ptr = (RangerPrivilegeResults *) lfirst(result);
-    if(result_ptr->result != RANGERCHECK_OK)
-    {
-      Oid relOid = result_ptr->relOid;
-      const char *rel_name = get_rel_name_partition(relOid);
-      aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_CLASS, rel_name);
-    }
-  }
-  
-  if (ranger_check_args)
-  {
-    list_free_deep(ranger_check_args);
-    ranger_check_args = NIL;
-  }
-  if (aclresults)
-  {
-    list_free_deep(aclresults);
-    aclresults = NIL;
-  }
+	List *ranger_check_args = NIL;
+	ListCell *l;
+	foreach(l, rangeTable)
+	{
+
+		AclMode requiredPerms;
+		Oid relOid;
+		Oid userid;
+		RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
+
+		if (rte->rtekind != RTE_RELATION)
+			continue;
+		requiredPerms = rte->requiredPerms;
+		if (requiredPerms == 0)
+			continue;
+
+		relOid = rte->relid;
+		userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
+
+		RangerPrivilegeArgs *ranger_check_arg = (RangerPrivilegeArgs *) palloc(sizeof(RangerPrivilegeArgs));
+		ranger_check_arg->objkind = ACL_KIND_CLASS;
+		ranger_check_arg->object_oid = relOid;
+		ranger_check_arg->roleid = userid;
+		ranger_check_arg->mask = requiredPerms;
+		ranger_check_arg->how = ACLMASK_ALL;
+		ranger_check_args = lappend(ranger_check_args, ranger_check_arg);
+
+	}
+
+	if (ranger_check_args == NIL)
+		return;
+
+	/* ranger ACL check with package Oids */
+	List *aclresults = NIL;
+	aclresults = pg_rangercheck_batch(ranger_check_args);
+	if (aclresults == NIL)
+	{
+		elog(ERROR, "ACL check failed\n");
+		return;
+	}
+
+	/* check result */
+	StringInfoData acl_fail_msg;
+	bool acl_allok = true;
+
+	ListCell *result;
+	foreach(result, aclresults)
+	{
+		RangerPrivilegeResults *result_ptr = (RangerPrivilegeResults *) lfirst(result);
+		if(result_ptr->result != RANGERCHECK_OK)
+		{
+			if (acl_allok)
+			{
+				initStringInfo(&acl_fail_msg);
+				appendStringInfo(&acl_fail_msg, "permission denied for relation(s): ");
+			}
+			else
+			{
+				appendStringInfo(&acl_fail_msg, ", ");
+			}
+			acl_allok = false;
+
+			/* collect all acl fail relations */
+			Oid relOid = result_ptr->relOid;
+			const char *rel_name = get_rel_name_partition(relOid);
+			appendStringInfo(&acl_fail_msg, "%s", rel_name);
+		}
+	}
+
+	if (ranger_check_args)
+	{
+		list_free_deep(ranger_check_args);
+		ranger_check_args = NIL;
+	}
+	if (aclresults)
+	{
+		list_free_deep(aclresults);
+		aclresults = NIL;
+	}
+
+	if (!acl_allok)
+	{
+		errstart(ERROR, __FILE__, __LINE__, PG_FUNCNAME_MACRO, TEXTDOMAIN);
+		errmsg("%s", acl_fail_msg.data),
+		pfree(acl_fail_msg.data);
+		errfinish(errOmitLocation(true));
+	}
 }
 
 /*

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bf4742cb/src/include/utils/rangerrest.h
----------------------------------------------------------------------
diff --git a/src/include/utils/rangerrest.h b/src/include/utils/rangerrest.h
index f67d8e5..136804f 100644
--- a/src/include/utils/rangerrest.h
+++ b/src/include/utils/rangerrest.h
@@ -84,6 +84,13 @@ typedef struct RangerPrivilegeResults
 {
   RangerACLResult result;
   Oid relOid;
+
+  /* 
+   * string_hash of access[i] field of ranger request 
+   * use the sign to identify each resource result
+   */ 
+  uint32 resource_sign;
+  uint32 privilege_sign;
 } RangerPrivilegeResults;
 
 typedef struct RangerRequestJsonArgs {
@@ -94,10 +101,8 @@ typedef struct RangerRequestJsonArgs {
   bool isAll;
 } RangerRequestJsonArgs;
 
-RangerACLResult parse_ranger_response(char *);
-json_object *create_ranger_request_json(List *);
-int call_ranger_rest(CURL_HANDLE curl_handle, const char *request);
-extern int check_privilege_from_ranger(List *);
 extern struct curl_context_t curl_context_ranger;
 
+int check_privilege_from_ranger(List *request_list, List *result_list);
+
 #endif


[35/50] [abbrv] incubator-hawq git commit: HAWQ-1276. hawq should error out directly when ranger plugin service is unavailable, not a warning.

Posted by es...@apache.org.
HAWQ-1276. hawq should error out directly when ranger plugin service is unavailable, not a warning.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/326fa4f9
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/326fa4f9
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/326fa4f9

Branch: refs/heads/2.1.0.0-incubating
Commit: 326fa4f918b2be14adb3d667aea6cd707a24fa16
Parents: 21f1e29
Author: stanlyxiang <st...@gmail.com>
Authored: Wed Jan 18 15:40:12 2017 +0800
Committer: hubertzhang <hu...@apache.org>
Committed: Thu Jan 19 11:54:40 2017 +0800

----------------------------------------------------------------------
 src/backend/libpq/rangerrest.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/326fa4f9/src/backend/libpq/rangerrest.c
----------------------------------------------------------------------
diff --git a/src/backend/libpq/rangerrest.c b/src/backend/libpq/rangerrest.c
index dc5d193..c920575 100644
--- a/src/backend/libpq/rangerrest.c
+++ b/src/backend/libpq/rangerrest.c
@@ -425,7 +425,7 @@ static int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
 	/* check for errors */
 	if(res != CURLE_OK)
 	{
-		elog(WARNING, "ranger plugin service from http://%s:%d/%s is unavailable : %s.\n",
+		elog(ERROR, "ranger plugin service from http://%s:%d/%s is unavailable : %s.\n",
 				rps_addr_host, rps_addr_port, rps_addr_suffix, curl_easy_strerror(res));
 	}
 	else
@@ -467,7 +467,7 @@ int check_privilege_from_ranger(List *request_list, List *result_list)
 	int ret = parse_ranger_response(curl_context_ranger.response.buffer, result_list);
 	if (ret < 0)
 	{
-		elog(WARNING, "parse ranger response failed, ranger response content is %s",
+		elog(ERROR, "parse ranger response failed, ranger response content is %s",
 			curl_context_ranger.response.buffer == NULL? "empty.":curl_context_ranger.response.buffer);
 	}
 	if (curl_context_ranger.response.buffer != NULL)


[43/50] [abbrv] incubator-hawq git commit: HAWQ-1297. Make PXF install ready from source

Posted by es...@apache.org.
HAWQ-1297. Make PXF install ready from source


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/aac8868f
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/aac8868f
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/aac8868f

Branch: refs/heads/2.1.0.0-incubating
Commit: aac8868f8e3d8c0158a1a58b8593a713056099e4
Parents: d3983eb
Author: Shivram Mani <sh...@gmail.com>
Authored: Mon Jan 30 15:53:07 2017 -0800
Committer: Shivram Mani <sh...@gmail.com>
Committed: Mon Jan 30 15:53:07 2017 -0800

----------------------------------------------------------------------
 pxf/Makefile                                    |  23 +++-
 pxf/build.gradle                                |  29 +++-
 .../src/main/resources/pxf-private.classpath    |  67 ++++++++++
 pxf/pxf-service/src/scripts/pxf-env.sh          |   6 +
 pxf/pxf-service/src/scripts/pxf-service         | 132 ++++++++++++++-----
 5 files changed, 211 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/aac8868f/pxf/Makefile
----------------------------------------------------------------------
diff --git a/pxf/Makefile b/pxf/Makefile
index 7669772..81298d0 100644
--- a/pxf/Makefile
+++ b/pxf/Makefile
@@ -19,26 +19,36 @@
 default: all
 
 ifneq "$(HD)" ""
-BUILD_PARAMS= -Dhd=$(HD)
+    BUILD_PARAMS= -Dhd=$(HD)
+else
+    ifneq "$(PXF_HOME)" ""
+        BUILD_PARAMS= -DdeployPath=$(PXF_HOME)
+    else ifneq "$(GPHOME)" ""
+        BUILD_PARAMS= -DdeployPath="$(GPHOME)/pxf"
+    else
+		@echo "Cannot invoke install without configuring either PXF_HOME or GPHOME"
+    endif
 endif
 
 ifneq "$(LICENSE)" ""
-BUILD_PARAMS+= -Plicense="$(LICENSE)"
+    BUILD_PARAMS+= -Plicense="$(LICENSE)"
 endif
+
 ifneq "$(VENDOR)" ""
-BUILD_PARAMS+= -Pvendor="$(VENDOR)"
+    BUILD_PARAMS+= -Pvendor="$(VENDOR)"
 endif
 
 help:
 	@echo 
-	@echo	"help it is then"
-	@echo	"Possible targets"
+	@echo"help it is then"
+	@echo   "Possible targets"
 	@echo	"  - all (clean, build, unittest, jar, tar, rpm)"
 	@echo	"  -  -  HD=<phd|hdp> - set classpath to match hadoop distribution. default phd"
 	@echo	"  -  -  LICENSE=<license info> - add license info to created RPMs"
 	@echo	"  -  -  VENDOR=<vendor name> - add vendor name to created RPMs"
 	@echo	"  - tomcat - builds tomcat rpm from downloaded tarball"
 	@echo	"  -  -  LICENSE and VENDOR parameters can be used as well"
+	@echo	"  - deploy - setup PXF along with tomcat in the configured deployPath"
 	@echo	"  - doc - creates aggregate javadoc under docs"
 
 all: 
@@ -65,3 +75,6 @@ doc:
 .PHONY: tomcat
 tomcat:
 	./gradlew tomcatRpm $(BUILD_PARAMS)
+
+install:
+	./gradlew install $(BUILD_PARAMS)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/aac8868f/pxf/build.gradle
----------------------------------------------------------------------
diff --git a/pxf/build.gradle b/pxf/build.gradle
index 385bf08..3c6d591 100644
--- a/pxf/build.gradle
+++ b/pxf/build.gradle
@@ -473,8 +473,7 @@ task rpm(type: Copy, dependsOn: [subprojects.build, distSubprojects.buildRpm]) {
 
 // tomcat 
 def tomcatName = "apache-tomcat-${tomcatVersion}"
-def tomcatTargetDir = "tomcat/build/"
-
+def tomcatTargetDir = "tomcat/build"
 
 task tomcatGet << {
 
@@ -506,12 +505,12 @@ task tomcatGet << {
 apply plugin: 'os-package'
 
 task tomcatRpm(type: Rpm) {
-    buildDir = 'tomcat/build/'
+    buildDir = "${tomcatTargetDir}"
 
     // clean should not delete the downloaded tarball
     // and RPM, so this is a bogus directory to delete instead.
     clean {
-        delete = 'tomcat/build/something'
+        delete = "${tomcatTargetDir}/something"
     }
 
     ospackage {
@@ -544,6 +543,26 @@ task tomcatRpm(type: Rpm) {
 
 tomcatRpm.dependsOn tomcatGet
 
+def pxfTargetDir = System.properties['deployPath'] ?: "build/"
+
+task install(type: Copy, dependsOn: [subprojects.build, tomcatGet]) {
+    into "${pxfTargetDir}"
+    subprojects { subProject ->
+        from("${project.name}/build/libs") { into 'lib' }
+    }
+    from("pxf-service/src/scripts/pxf-service") {
+        into 'bin'
+        fileMode 0755
+        rename('pxf-service', 'pxf')
+    }
+
+    from("${tomcatTargetDir}/${tomcatName}") { into 'apache-tomcat' }
+    from("pxf-service/src/main/resources") { into 'conf' }
+    from("pxf-service/src/configs/pxf-site.xml") { into 'conf' }
+    from("pxf-service/src/scripts/pxf-env.sh") { into 'conf' }
+    from("pxf-service/src/configs/tomcat") { into 'tomcat-templates' }
+}
+
 
 buildDir = '.'
-apply plugin: 'nebula-aggregate-javadocs'
\ No newline at end of file
+apply plugin: 'nebula-aggregate-javadocs'

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/aac8868f/pxf/pxf-service/src/main/resources/pxf-private.classpath
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/main/resources/pxf-private.classpath b/pxf/pxf-service/src/main/resources/pxf-private.classpath
new file mode 100644
index 0000000..48ac2f9
--- /dev/null
+++ b/pxf/pxf-service/src/main/resources/pxf-private.classpath
@@ -0,0 +1,67 @@
+##################################################################
+# This file contains the internal classpaths required to run PXF.
+# Edit to set the base paths according to your specific package layout
+# Adding new resources should be done using pxf-public.classpath file.
+##################################################################
+
+# PXF Configuration
+pxf/conf
+
+# Hadoop Configuration
+hadoop/etc/hadoop
+
+# Hive Configuration
+hive/conf
+
+# Hbase Configuration
+hbase/conf
+
+# PXF Libraries
+pxf/pxf-hbase-*[0-9].jar
+pxf/pxf-hdfs-*[0-9].jar
+pxf/pxf-hive-*[0-9].jar
+pxf/pxf-json-*[0-9].jar
+
+# Hadoop Libraries
+hadoop/share/hadoop/hdfs/hadoop-hdfs-*[0-9].jar
+hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-core-*[0-9].jar
+hadoop/share/hadoop/common/lib/hadoop-auth-*[0-9].jar
+hadoop/share/hadoop/common/hadoop-common-*[0-9].jar
+hadoop/share/hadoop/common/lib/asm-*[0-9].jar
+hadoop/share/hadoop/common/lib/avro-*[0-9].jar
+hadoop/share/hadoop/common/lib/commons-cli-*[0-9].jar
+hadoop/share/hadoop/common/lib/commons-codec-*[0-9].jar
+hadoop/share/hadoop/common/lib/commons-collections-*[0-9].jar
+hadoop/share/hadoop/common/lib/commons-configuration-*[0-9].jar
+hadoop/share/hadoop/common/lib/commons-io-*[0-9].jar
+hadoop/share/hadoop/common/lib/commons-lang-*[0-9].jar
+hadoop/share/hadoop/common/lib/commons-logging-*[0-9].jar
+hadoop/share/hadoop/common/lib/guava-*[0-9].jar
+hadoop/share/hadoop/common/lib/htrace-core-*[0-9]*.jar
+hadoop/share/hadoop/common/lib/jetty-*.jar
+hadoop/share/hadoop/common/lib/jackson-core-asl-*[0-9].jar
+hadoop/share/hadoop/common/lib/jackson-mapper-asl-*[0-9].jar
+hadoop/share/hadoop/common/lib/jersey-core-*[0-9].jar
+hadoop/share/hadoop/common/lib/jersey-server-*[0-9].jar
+hadoop/share/hadoop/common/lib/log4j-*[0-9].jar
+hadoop/share/hadoop/common/lib/protobuf-java-*[0-9].jar
+hadoop/share/hadoop/common/lib/slf4j-api-*[0-9].jar
+
+# Hive Libraries
+hive/lib/antlr-runtime-*[0-9].jar
+hive/lib/datanucleus-api-jdo-*[0-9].jar
+hive/lib/datanucleus-core-*[0-9].jar
+hive/lib/hive-exec-*[0-9].jar
+hive/lib/hive-metastore-*[0-9].jar
+hive/lib/jdo-api-*[0-9].jar
+hive/lib/libfb303-*[0-9].jar
+# when running on OSx, 1.0.5 or higher version is required
+hive/lib/snappy-java-*[0-9].jar
+
+# HBase Libraries
+hbase/lib/hbase-client-*[0-9].jar
+hbase/lib/hbase-common-*[0-9].jar
+hbase/lib/hbase-protocol-*[0-9].jar
+hbase/lib/htrace-core-*[0-9]*.jar
+hbase/lib/netty-*[0-9].Final.jar
+hbase/lib/zookeeper-*[0-9].jar
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/aac8868f/pxf/pxf-service/src/scripts/pxf-env.sh
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/scripts/pxf-env.sh b/pxf/pxf-service/src/scripts/pxf-env.sh
index 2ac80a9..ba9b368 100644
--- a/pxf/pxf-service/src/scripts/pxf-env.sh
+++ b/pxf/pxf-service/src/scripts/pxf-env.sh
@@ -28,3 +28,9 @@ export CATALINA_OUT=${PXF_LOGDIR}/catalina.out
 
 # Path to Run directory
 export PXF_RUNDIR=/var/run/pxf
+
+# Configured user
+export PXF_USER=pxf
+
+# Port
+export PXF_PORT=51200
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/aac8868f/pxf/pxf-service/src/scripts/pxf-service
----------------------------------------------------------------------
diff --git a/pxf/pxf-service/src/scripts/pxf-service b/pxf/pxf-service/src/scripts/pxf-service
index 7f62504..2d63310 100644
--- a/pxf/pxf-service/src/scripts/pxf-service
+++ b/pxf/pxf-service/src/scripts/pxf-service
@@ -20,18 +20,12 @@
 # pxf-service	start/stop/initialize/status the PXF instance
 #
 
-pxf_root=/usr/lib/pxf
-env_script=/etc/pxf/conf/pxf-env.sh
-tomcat_root=/opt/apache-tomcat
-tomcat_templates=/opt/pxf/tomcat-templates
-instance_root=/var/pxf
 
-pxf_user=pxf
-instance_name=pxf-service
-instance_port=51200
-instance_owner=pxf:pxf
-
-curl=`which curl`
+if [ -z $PXF_HOME ]; then
+    env_script=/etc/pxf/conf/pxf-env.sh
+else
+    env_script=$PXF_HOME/conf/pxf-env.sh
+fi
 
 # load pxf-env.sh script
 if [ ! -f $env_script ]; then
@@ -40,6 +34,29 @@ else
 	source $env_script
 fi
 
+pxf_user=$PXF_USER
+instance_port=$PXF_PORT
+instance_name=pxf-service
+
+if [ -z $PXF_HOME ]; then
+    # RPM based setup
+    pxf_root=/usr/lib/pxf
+    tomcat_root=/opt/apache-tomcat
+    tomcat_templates=/opt/pxf/tomcat-templates
+    instance_root=/var/pxf
+    instance_owner=$pxf_user:$pxf_user
+else
+    # OSS/Source code based setup
+    pxf_root=$PXF_HOME/lib
+    tomcat_root=$PXF_HOME/apache-tomcat
+    tomcat_templates=$PXF_HOME/tomcat-templates
+    instance_root=$PXF_HOME
+    instance_owner=$pxf_user
+fi
+
+curl=`which curl`
+
+
 # validate JAVA_HOME
 if [ ! -x $JAVA_HOME/bin/java ]; then
 	echo ERROR: \$JAVA_HOME is invalid
@@ -61,9 +78,8 @@ function createInstance()
 		return 1
 	fi
 
-	chown $instance_owner -R $instance_root
+	chown -R $instance_owner $instance_root
 	chmod 700 $instance_root/$instance_name
-
 	return 0
 }
 
@@ -97,7 +113,7 @@ function configureInstance()
 	# set pid
 	catalinaEnv=$instance_root/$instance_name/bin/setenv.sh
 	cat $catalinaEnv | \
-	sed "s|^[[:blank:]]*CATALINA_PID=.*$|CATALINA_PID=$PXF_RUNDIR/catalina.pid|g" \
+	sed -e "s|^[[:blank:]]*CATALINA_PID=.*$|CATALINA_PID=$PXF_RUNDIR/catalina.pid|g" \
 	> ${catalinaEnv}.tmp
 	rm $catalinaEnv
 	mv ${catalinaEnv}.tmp $catalinaEnv
@@ -176,7 +192,7 @@ function checkWebapp()
 # non zero otherwise
 function instanceExists()
 {
-	if [ ! -d $instance_root ]; then
+	if [ ! -d "$instance_root/$instance_name" ]; then
 		return 1
 	fi
 
@@ -193,39 +209,83 @@ function doInit()
 		return 0
 	fi
 
-	createInstance || return 1
+    createInstance || return 1
 	configureInstance || return 1
 	deployWebapp || return 1
 }
 
+#
+# patchWebapp patches the webapp config files
+# patch applied only if PXF_HOME is defined
+#
+function patchWebapp()
+{
+    if [ -z $PXF_HOME ]; then
+        # webapp doesn't require patch
+        return 0
+    fi
+    pushd $instance_root/$instance_name/webapps || return 1
+    rm -rf pxf
+    mkdir pxf
+    cd pxf
+    unzip -q ../pxf.war
+    popd
+
+    context_file=$instance_root/$instance_name/webapps/pxf/META-INF/context.xml
+    cat $context_file | \
+    sed  -e "s:classpathFiles=\"[a-zA-Z0-9\/\;.-]*\":classpathFiles=\"$PXF_HOME\/conf\/pxf-private.classpath\":" \
+    -e "s:secondaryClasspathFiles=\"[a-zA-Z0-9\/\;.-]*\":secondaryClasspathFiles=\"$PXF_HOME\/conf\/pxf-public.classpath\":" > context.xml.tmp
+    mv context.xml.tmp $context_file
+
+    web_file=$instance_root/$instance_name/webapps/pxf/WEB-INF/web.xml
+    cat $web_file | \
+    sed "s:<param-value>.*pxf-log4j.properties<\/param-value>:<param-value>$PXF_HOME\/conf\/pxf-log4j.properties<\/param-value>:" > web.xml.tmp
+    mv web.xml.tmp $web_file
+}
+
+function commandWebapp()
+{
+    command=$1
+    pushd $instance_root
+    su $pxf_user -c "$instance_root/$instance_name/bin/catalina.sh $command"
+    if [ $? -ne 0 ]; then
+        return 1
+    fi
+    popd
+}
+
 # 
-# doStartStop handles start/stop commands
-# commands are executed as the user $pxf_user
+# doStart handles start command
+# command is executed as the user $pxf_user
 #
 # after start, uses checkWebapp to verify the PXF webapp was loaded
 # successfully
 #
-function doStartStop()
+function doStart()
 {
-	command=$1
-
 	instanceExists
 	if [ $? -ne 0 ]; then
 		echo ERROR: cant find PXF instance, maybe call init?
 		return 1
 	fi
+	patchWebapp || return 1
+	commandWebapp start || return 1
+	checkWebapp 300 || return 1
+}
 
-	pushd $instance_root
-	su $pxf_user -c "$instance_root/$instance_name/bin/catalina.sh $command"
-	if [ $? -ne 0 ]; then
-		return 1
-	fi 
-	popd
-	
-	if [ "$command" = "start" ]; then
-		# try to connect for 5 minutes
-		checkWebapp 300 || return 1
-	fi
+#
+# doStart handles stop command
+# command is executed as the user $pxf_user
+#
+#
+function doStop()
+{
+    instanceExists
+    if [ $? -ne 0 ]; then
+        echo "ERROR: can't find PXF instance, maybe call init?"
+        return 1
+    fi
+    commandWebapp stop || return 1
 }
 
 function doStatus()
@@ -240,15 +300,15 @@ case "$command" in
 		doInit
 		;;
 	"start" )
-		doStartStop $command
+		doStart
 		;;
 	"stop" )
-		doStartStop $command
+		doStop
 		;;
 	"restart" )
-		doStartStop stop
+		doStop
 		sleep 1s
-		doStartStop start
+		doStart
 		;;
 	"status" )
 		doStatus


[07/50] [abbrv] incubator-hawq git commit: HAWQ-1246. Add generation of RequestID, ClientIP, queryContext(SQL Statement) and encapsulate these contents to JSON request to RPS.

Posted by es...@apache.org.
HAWQ-1246. Add generation of RequestID, ClientIP, queryContext(SQL Statement) and encapsulate these contents to JSON request to RPS.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/60f09337
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/60f09337
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/60f09337

Branch: refs/heads/2.1.0.0-incubating
Commit: 60f093372888fdead70ffea1d2b035c7d2bc343d
Parents: 94239f5
Author: stanlyxiang <st...@gmail.com>
Authored: Thu Dec 22 10:28:17 2016 +0800
Committer: hzhang2 <zh...@163.com>
Committed: Tue Jan 3 10:39:51 2017 +0800

----------------------------------------------------------------------
 src/backend/catalog/aclchk.c   |  57 ++++---
 src/backend/libpq/rangerrest.c | 318 +++++++++++-------------------------
 src/include/utils/rangerrest.h |  11 +-
 3 files changed, 134 insertions(+), 252 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/60f09337/src/backend/catalog/aclchk.c
----------------------------------------------------------------------
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index d3e4b64..d19a045 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -2732,7 +2732,7 @@ List *pg_rangercheck_batch(List *arg_list)
 
   } // foreach
 
-  RangerACLResult ret = check_privilege_from_ranger_batch(requestargs);
+  RangerACLResult ret = check_privilege_from_ranger(requestargs);
 
   ListCell *result;
   int k = 0;
@@ -2752,7 +2752,7 @@ List *pg_rangercheck_batch(List *arg_list)
           (RangerRequestJsonArgs*)lfirst(tmp);
       pfree(requestarg->user);
       pfree(requestarg->object);
-      pfree(requestarg->actions);
+      list_free_deep(requestarg->actions);
     }
 
     list_free_deep(requestargs);
@@ -2770,27 +2770,38 @@ AclResult
 pg_rangercheck(AclObjectKind objkind, Oid object_oid, Oid roleid,
          AclMode mask, AclMaskHow how)
 {
-  char* objectname = getNameFromOid(objkind, object_oid);
-  char* rolename = getRoleName(roleid);
-  List* actions = getActionName(mask);
-  bool isAll = (how == ACLMASK_ALL) ? true: false;
-
-  elog(LOG, "rangeraclcheck kind:%d,objectname:%s,role:%s,mask:%u\n",objkind,objectname,rolename,mask);
-  int ret = check_privilege_from_ranger(rolename, objkind, objectname, actions, isAll);
-
-  if(objectname){
-    pfree(objectname);
-    objectname = NULL;
-  }
-  if(rolename){
-    pfree(rolename);
-    rolename = NULL;
-  }
-  if(actions){
-    list_free_deep(actions);
-    actions = NIL;
-  }
-  return ret;
+	char* objectname = getNameFromOid(objkind, object_oid);
+	char* rolename = getRoleName(roleid);
+	List* actions = getActionName(mask);
+	bool isAll = (how == ACLMASK_ALL) ? true: false;
+
+	elog(LOG, "rangeraclcheck kind:%d,objectname:%s,role:%s,mask:%u\n",objkind,objectname,rolename,mask);
+	List *requestargs = NIL;
+	RangerRequestJsonArgs *requestarg = (RangerRequestJsonArgs *) palloc(sizeof(RangerRequestJsonArgs));
+	requestarg->user = rolename;
+	requestarg->kind = objkind;
+	requestarg->object = objectname;
+	requestarg->actions = actions;
+	requestarg->isAll = isAll;
+	requestargs = lappend(requestargs, requestarg);
+	int ret = check_privilege_from_ranger(requestargs);
+
+	if (requestargs)
+	{
+		ListCell *cell = list_head(requestargs);
+		while (cell != NULL)
+		{
+			ListCell *tmp = cell;
+			cell = lnext(cell);
+			RangerRequestJsonArgs* requestarg = (RangerRequestJsonArgs*) lfirst(tmp);
+			pfree(requestarg->user);
+			pfree(requestarg->object);
+			list_free_deep(requestarg->actions);
+		}
+		list_free_deep(requestargs);
+		requestargs = NULL;
+	}
+	return ret;
 }
 
 /*

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/60f09337/src/backend/libpq/rangerrest.c
----------------------------------------------------------------------
diff --git a/src/backend/libpq/rangerrest.c b/src/backend/libpq/rangerrest.c
index 120f64f..56d30b5 100644
--- a/src/backend/libpq/rangerrest.c
+++ b/src/backend/libpq/rangerrest.c
@@ -49,10 +49,28 @@ char* AclObjectKindStr[] =
 	"none"               /* MUST BE LAST */
 };
 
+static int request_id = 1;
+
+static void getClientIP(char *remote_host)
+{
+	if( MyProcPort->remote_host == NULL || strlen(MyProcPort->remote_host) == 0 )
+	{
+		snprintf(remote_host, HOST_BUFFER_SIZE, "%s", "UNKNOWN");
+		return;
+	}
+	if (strcmp(MyProcPort->remote_host, "[local]") == 0)
+	{
+		snprintf(remote_host, HOST_BUFFER_SIZE, "%s", "127.0.0.1");
+	}
+	else
+	{
+		snprintf(remote_host, HOST_BUFFER_SIZE, "%s", MyProcPort->remote_host);
+	}
+}
+
 RangerACLResult parse_ranger_response(char* buffer)
 {
-	Assert(buffer != NULL);
-	if (strlen(buffer) == 0)
+	if (buffer == NULL || strlen(buffer) == 0)
 		return RANGERCHECK_UNKNOWN;
 
 	elog(LOG, "read from Ranger Restful API: %s", buffer);
@@ -92,15 +110,43 @@ RangerACLResult parse_ranger_response(char* buffer)
 	return RANGERCHECK_OK;
 
 }
-
-/*
- * args: List of RangerRequestJsonArgs
+/**
+ * Create a JSON object for Ranger request given some parameters.
+ *
+ *   {
+ *     "requestId": 1,
+ *     "user": "joe",
+ *     "groups": ["admin","us"],
+ *     "clientIp": "123.0.0.21",
+ *     "context": "SELECT * FROM sales",
+ *     "access":
+ *       [
+ *         {
+ *           "resource":
+ *           {
+ *             "database": "finance"
+ *           },
+ *           "privileges": ["connect"]
+ *         },
+ *         {
+ *           "resource":
+ *           {
+ *             "database": "finance",
+ *             "schema": "us",
+ *             "table": "sales"
+ *           },
+ *           "privileges": ["select", "insert"]
+ *         }
+ *       ]
+ *   }
+ *
+ *   args: List of RangerRequestJsonArgs
  */
-json_object *create_ranger_request_json_batch(List *args)
+json_object *create_ranger_request_json(List *args)
 {
+	json_object *jrequest = json_object_new_object();
 	json_object *juser = NULL;
 	json_object *jaccess = json_object_new_array();
-	json_object *jrequest = json_object_new_object();
 	char *user = NULL;
 	ListCell *arg;
 
@@ -116,28 +162,27 @@ json_object *create_ranger_request_json_batch(List *args)
 		char* object = arg_ptr->object;
 		Assert(user != NULL && object != NULL && privilege != NULL && arg_ptr->isAll);
 		elog(LOG, "build json for ranger request, user:%s, kind:%s, object:%s",
-			user, AclObjectKindStr[kind], object);
+				user, AclObjectKindStr[kind], object);
 
-		json_object *jresource = json_object_new_object();
 		json_object *jelement = json_object_new_object();
+		json_object *jresource = json_object_new_object();
 		json_object *jactions = json_object_new_array();
-
 		switch(kind)
 		{
-		case ACL_KIND_CLASS:
-		case ACL_KIND_SEQUENCE:
-		case ACL_KIND_PROC:
-		case ACL_KIND_NAMESPACE:
-		case ACL_KIND_LANGUAGE:
+			case ACL_KIND_CLASS:
+			case ACL_KIND_SEQUENCE:
+			case ACL_KIND_PROC:
+			case ACL_KIND_NAMESPACE:
+			case ACL_KIND_LANGUAGE:
 			{
-				char *ptr = NULL; char *name = NULL;
+				char *ptr = NULL;
+				char *name = NULL;
 				char *first = NULL; // could be a database or protocol or tablespace
 				char *second = NULL; // could be a schema or language
 				char *third = NULL; // could be a table or sequence or function
 				int idx = 0;
-				for (name = strtok_r(object, ".", &ptr);
-					name;
-					name = strtok_r(NULL, ".", &ptr), idx++)
+				for (name = strtok_r(object, ".", &ptr); name;
+						name = strtok_r(NULL, ".", &ptr), idx++)
 				{
 					if (idx == 0)
 					{
@@ -180,24 +225,23 @@ json_object *create_ranger_request_json_batch(List *args)
 					pfree(third);
 				break;
 			}
-		case ACL_KIND_OPER:
-		case ACL_KIND_CONVERSION:
-		case ACL_KIND_DATABASE:
-		case ACL_KIND_TABLESPACE:
-		case ACL_KIND_TYPE:
-		case ACL_KIND_FILESYSTEM:
-		case ACL_KIND_FDW:
-		case ACL_KIND_FOREIGN_SERVER:
-		case ACL_KIND_EXTPROTOCOL:
+			case ACL_KIND_OPER:
+			case ACL_KIND_CONVERSION:
+			case ACL_KIND_DATABASE:
+			case ACL_KIND_TABLESPACE:
+			case ACL_KIND_TYPE:
+			case ACL_KIND_FILESYSTEM:
+			case ACL_KIND_FDW:
+			case ACL_KIND_FOREIGN_SERVER:
+			case ACL_KIND_EXTPROTOCOL:
 			{
 				json_object *jobject = json_object_new_string(object);
 				json_object_object_add(jresource, AclObjectKindStr[kind], jobject);
 				break;
 			}
-		default:
-			elog(ERROR, "unrecognized objkind: %d", (int) kind);
+			default:
+				elog(ERROR, "unrecognized objkind: %d", (int) kind);
 		} // switch
-
 		json_object_object_add(jelement, "resource", jresource);
 
 		ListCell *cell;
@@ -210,161 +254,22 @@ json_object *create_ranger_request_json_batch(List *args)
 		json_object_array_add(jaccess, jelement);
 
 	} // foreach
-
+	char str[32];
+	sprintf(str,"%d",request_id);
+	json_object *jreqid = json_object_new_string(str);
+	json_object_object_add(jrequest, "requestId", jreqid);
 	json_object_object_add(jrequest, "user", juser);
-	json_object_object_add(jrequest, "access", jaccess);
 
-	json_object *jreqid = json_object_new_string("1");
-	json_object_object_add(jrequest, "requestId", jreqid);
-	json_object *jclientip = json_object_new_string("123.0.0.21");
+	char remote_host[HOST_BUFFER_SIZE];
+	getClientIP(remote_host);
+	json_object *jclientip = json_object_new_string(remote_host);
 	json_object_object_add(jrequest, "clientIp", jclientip);
-	json_object *jcontext = json_object_new_string("SELECT * FROM DDDDDDD");
-	json_object_object_add(jrequest, "context", jcontext);
-
-	return jrequest;
-}
-
-/**
- * Create a JSON object for Ranger request given some parameters.
- *
- *   {
- *     "requestId": 1,
- *     "user": "joe",
- *     "groups": ["admin","us"],
- *     "clientIp": "123.0.0.21",
- *     "context": "SELECT * FROM sales",
- *     "access":
- *       [
- *         {
- *           "resource":
- *           {
- *             "database": "finance"
- *           },
- *           "privileges": ["connect"]
- *         },
- *         {
- *           "resource":
- *           {
- *             "database": "finance",
- *             "schema": "us",
- *             "table": "sales"
- *           },
- *           "privileges": ["select, insert"]
- *         }
- *       ]
- *   }
- */
-json_object* create_ranger_request_json(char* user, AclObjectKind kind, char* object,
-	List* actions, bool isAll)
-{
-	Assert(user != NULL && object != NULL && privilege != NULL
-		&& isAll);
-	ListCell *cell;
-
-	elog(LOG, "build json for ranger request, user:%s, kind:%s, object:%s",
-		user, AclObjectKindStr[kind], object);
-	json_object *jrequest = json_object_new_object();
-	json_object *juser = json_object_new_string(user);
-
-	json_object *jaccess = json_object_new_array();
-	json_object *jelement = json_object_new_object();
-
-	json_object *jresource = json_object_new_object();
-	switch(kind)
-	{
-	case ACL_KIND_CLASS:
-	case ACL_KIND_SEQUENCE:
-	case ACL_KIND_PROC:
-	case ACL_KIND_NAMESPACE:
-	case ACL_KIND_LANGUAGE:
-		{
-			char *ptr = NULL; char *name = NULL;
-			char *first = NULL; // could be a database or protocol or tablespace
-			char *second = NULL; // could be a schema or language
-			char *third = NULL; // could be a table or sequence or function
-			int idx = 0;
-			for (name = strtok_r(object, ".", &ptr);
-				name;
-				name = strtok_r(NULL, ".", &ptr), idx++)
-			{
-				if (idx == 0)
-				{
-					first = pstrdup(name);
-				}
-				else if (idx == 1)
-				{
-					second = pstrdup(name);
-				}
-				else
-				{
-					third = pstrdup(name);
-				}
-			}
-
-			if (first != NULL)
-			{
-				json_object *jfirst = json_object_new_string(first);
-				json_object_object_add(jresource, "database", jfirst);
-			}
-			if (second != NULL)
-			{
-				json_object *jsecond = json_object_new_string(second);
-				json_object_object_add(jresource,
-					(kind == ACL_KIND_LANGUAGE) ? "language" : "schema", jsecond);
-			}
-			if (third != NULL)
-			{
-				json_object *jthird = json_object_new_string(third);
-				json_object_object_add(jresource,
-					(kind == ACL_KIND_CLASS) ? "table" :
-					(kind == ACL_KIND_SEQUENCE) ? "sequence" : "function", jthird);
-			}
 
-			if (first != NULL)
-				pfree(first);
-			if (second != NULL)
-				pfree(second);
-			if (third != NULL)
-				pfree(third);
-			break;
-		}
-	case ACL_KIND_OPER:
-	case ACL_KIND_CONVERSION:
-	case ACL_KIND_DATABASE:
-	case ACL_KIND_TABLESPACE:
-	case ACL_KIND_TYPE:
-	case ACL_KIND_FILESYSTEM:
-	case ACL_KIND_FDW:
-	case ACL_KIND_FOREIGN_SERVER:
-	case ACL_KIND_EXTPROTOCOL:
-		{
-			json_object *jobject = json_object_new_string(object);
-			json_object_object_add(jresource, AclObjectKindStr[kind], jobject);
-			break;
-		}
-	default:
-		elog(ERROR, "unrecognized objkind: %d", (int) kind);
-	}
-
-	json_object *jactions = json_object_new_array();
-	foreach(cell, actions)
-	{
-		json_object* jaction = json_object_new_string((char *)cell->data.ptr_value);
-		json_object_array_add(jactions, jaction);
-	}
-	json_object_object_add(jelement, "resource", jresource);
-	json_object_object_add(jelement, "privileges", jactions);
-	json_object_array_add(jaccess, jelement);
-
-	json_object_object_add(jrequest, "user", juser);
-	json_object_object_add(jrequest, "access", jaccess);
-	json_object *jreqid = json_object_new_string("1");
-	json_object_object_add(jrequest, "requestId", jreqid);
-	json_object *jclientip = json_object_new_string("123.0.0.21");
-	json_object_object_add(jrequest, "clientIp", jclientip);
-	json_object *jcontext = json_object_new_string("SELECT * FROM DDDDDDD");
+	json_object *jcontext = json_object_new_string(
+			(debug_query_string == NULL || strlen(debug_query_string) == 0)
+				? "connect to db" : debug_query_string);
 	json_object_object_add(jrequest, "context", jcontext);
-
+	json_object_object_add(jrequest, "access", jaccess);
 
 	return jrequest;
 }
@@ -435,11 +340,9 @@ int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_URL, tname.data);
 
 	struct curl_slist *headers = NULL;
-	//curl_slist_append(headers, "Accept: application/json");
 	headers = curl_slist_append(headers, "Content-Type:application/json");
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_HTTPHEADER, headers);
 
-	//curl_easy_setopt(curl_handle->curl_handle, CURLOPT_POST, 1L);
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_POSTFIELDS,request);
 	//"{\"requestId\": 1,\"user\": \"hubert\",\"clientIp\":\"123.0.0.21\",\"context\": \"SELECT * FROM sales\",\"access\":[{\"resource\":{\"database\":\"a-database\",\"schema\":\"a-schema\",\"table\":\"sales\"},\"privileges\": [\"select\"]}]}");
 	/* send all data to this function  */
@@ -447,7 +350,11 @@ int call_ranger_rest(CURL_HANDLE curl_handle, const char* request)
 	curl_easy_setopt(curl_handle->curl_handle, CURLOPT_WRITEDATA, (void *)curl_handle);
 
 	res = curl_easy_perform(curl_handle->curl_handle);
-
+	if(request_id == INT_MAX)
+	{
+		request_id = 0;
+	}
+	request_id++;
 	/* check for errors */
 	if(res != CURLE_OK)
 	{
@@ -476,9 +383,9 @@ _exit:
 /*
  * arg_list: List of RangerRequestJsonArgs
  */
-int check_privilege_from_ranger_batch(List *arg_list)
+int check_privilege_from_ranger(List *arg_list)
 {
-	json_object* jrequest = create_ranger_request_json_batch(arg_list);
+	json_object* jrequest = create_ranger_request_json(arg_list);
 	Assert(jrequest != NULL);
 	const char *request = json_object_to_json_string(jrequest);
 	elog(LOG, "Send JSON request to Ranger: %s", request);
@@ -505,42 +412,3 @@ int check_privilege_from_ranger_batch(List *arg_list)
 
 	return ret;
 }
-
-/*
- * Check the privilege from Ranger for one role
- */
-int check_privilege_from_ranger(char* user, AclObjectKind kind, char* object,
-	List* actions, bool isAll)
-{
-	json_object* jrequest = create_ranger_request_json(user, kind, object,
-		actions, isAll);
-
-	Assert(jrequest != NULL);
-	const char* request = json_object_to_json_string(jrequest);
-	elog(LOG, "send JSON request to Ranger: %s", request);
-	Assert(request != NULL);
-
-	struct curl_context_t curl_context;
-	memset(&curl_context, 0, sizeof(struct curl_context_t));
-
-	/* call GET method to send request*/
-	if (call_ranger_rest(&curl_context, request) < 0)
-	{
-		return RANGERCHECK_NO_PRIV;
-	}
-
-	/* free the JSON object */
-	json_object_put(jrequest);
-
-	/* parse the JSON-format result */
-	RangerACLResult ret = parse_ranger_response(curl_context.response.buffer);
-
-	/* free response buffer */
-	if (curl_context.response.buffer != NULL)
-	{
-		pfree(curl_context.response.buffer);
-	}
-
-	return ret;
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/60f09337/src/include/utils/rangerrest.h
----------------------------------------------------------------------
diff --git a/src/include/utils/rangerrest.h b/src/include/utils/rangerrest.h
index 541bdbc..692c832 100644
--- a/src/include/utils/rangerrest.h
+++ b/src/include/utils/rangerrest.h
@@ -32,6 +32,11 @@
 #include "postgres.h"
 #include "utils/acl.h"
 #include "utils/guc.h"
+#include "miscadmin.h"
+#include "libpq/libpq-be.h"
+#include "tcop/tcopprot.h"
+
+#define HOST_BUFFER_SIZE 1025
 
 typedef enum
 {
@@ -86,10 +91,8 @@ typedef struct RangerRequestJsonArgs {
 } RangerRequestJsonArgs;
 
 RangerACLResult parse_ranger_response(char *);
-json_object *create_ranger_request_json_batch(List *);
-json_object *create_ranger_request_json(char *, AclObjectKind kind, char *, List *, bool);
+json_object *create_ranger_request_json(List *);
 int call_ranger_rest(CURL_HANDLE curl_handle, const char *request);
-extern int check_privilege_from_ranger_batch(List *);
-extern int check_privilege_from_ranger(char *, AclObjectKind kind, char *, List *, bool);
+extern int check_privilege_from_ranger(List *);
 
 #endif


[20/50] [abbrv] incubator-hawq git commit: Revert "HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base."

Posted by es...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-site.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-site.xml
deleted file mode 100644
index a810ca4..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/kms-site.xml
+++ /dev/null
@@ -1,173 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-
-  <!-- KMS Backend KeyProvider -->
-
-  <property>
-    <name>hadoop.kms.key.provider.uri</name>
-    <value>jceks://file@/${user.home}/kms.keystore</value>
-    <description>
-      URI of the backing KeyProvider for the KMS.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.security.keystore.JavaKeyStoreProvider.password</name>
-    <value>none</value>
-    <description>
-      If using the JavaKeyStoreProvider, the password for the keystore file.
-    </description>
-  </property>
-
-  <!-- KMS Cache -->
-
-  <property>
-    <name>hadoop.kms.cache.enable</name>
-    <value>true</value>
-    <description>
-      Whether the KMS will act as a cache for the backing KeyProvider.
-      When the cache is enabled, operations like getKeyVersion, getMetadata,
-      and getCurrentKey will sometimes return cached data without consulting
-      the backing KeyProvider. Cached values are flushed when keys are deleted
-      or modified.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.cache.timeout.ms</name>
-    <value>600000</value>
-    <description>
-      Expiry time for the KMS key version and key metadata cache, in
-      milliseconds. This affects getKeyVersion and getMetadata.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.current.key.cache.timeout.ms</name>
-    <value>30000</value>
-    <description>
-      Expiry time for the KMS current key cache, in milliseconds. This
-      affects getCurrentKey operations.
-    </description>
-  </property>
-
-  <!-- KMS Audit -->
-
-  <property>
-    <name>hadoop.kms.audit.aggregation.window.ms</name>
-    <value>10000</value>
-    <description>
-      Duplicate audit log events within the aggregation window (specified in
-      ms) are quashed to reduce log traffic. A single message for aggregated
-      events is printed at the end of the window, along with a count of the
-      number of aggregated events.
-    </description>
-  </property>
-
-  <!-- KMS Security -->
-
-  <property>
-    <name>hadoop.kms.authentication.type</name>
-    <value>simple</value>
-    <description>
-      Authentication type for the KMS. Can be either &quot;simple&quot;
-      or &quot;kerberos&quot;.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.kerberos.keytab</name>
-    <value>${user.home}/kms.keytab</value>
-    <description>
-      Path to the keytab with credentials for the configured Kerberos principal.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.kerberos.principal</name>
-    <value>HTTP/localhost</value>
-    <description>
-      The Kerberos principal to use for the HTTP endpoint.
-      The principal must start with 'HTTP/' as per the Kerberos HTTP SPNEGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.kerberos.name.rules</name>
-    <value>DEFAULT</value>
-    <description>
-      Rules used to resolve Kerberos principal names.
-    </description>
-  </property>
-
-  <!-- Authentication cookie signature source -->
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider</name>
-    <value>random</value>
-    <description>
-      Indicates how the secret to sign the authentication cookies will be
-      stored. Options are 'random' (default), 'string' and 'zookeeper'.
-      If using a setup with multiple KMS instances, 'zookeeper' should be used.
-    </description>
-  </property>
-
-  <!-- Configuration for 'zookeeper' authentication cookie signature source -->
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.path</name>
-    <value>/hadoop-kms/hadoop-auth-signature-secret</value>
-    <description>
-      The Zookeeper ZNode path where the KMS instances will store and retrieve
-      the secret from.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string</name>
-    <value>#HOSTNAME#:#PORT#,...</value>
-    <description>
-      The Zookeeper connection string, a list of hostnames and port comma
-      separated.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type</name>
-    <value>kerberos</value>
-    <description>
-      The Zookeeper authentication type, 'none' or 'sasl' (Kerberos).
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab</name>
-    <value>/etc/hadoop/conf/kms.keytab</value>
-    <description>
-      The absolute path for the Kerberos keytab with the credentials to
-      connect to Zookeeper.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal</name>
-    <value>kms/#HOSTNAME#</value>
-    <description>
-      The Kerberos service principal used to connect to Zookeeper.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/log4j.properties b/contrib/hawq-docker/centos7-docker/hawq-test/conf/log4j.properties
deleted file mode 100644
index c901ab1..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/log4j.properties
+++ /dev/null
@@ -1,291 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshold=ALL
-
-# Null Appender
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Rolling File Appender - cap space usage at 5gb.
-#
-hadoop.log.maxfilesize=256MB
-hadoop.log.maxbackupindex=20
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
-log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollover at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# HDFS block state change log from block manager
-#
-# Uncomment the following to suppress normal block state change
-# messages from BlockManager in NameNode.
-#log4j.logger.BlockStateChange=WARN
-
-#
-#Security appender
-#
-hadoop.security.logger=INFO,NullAppender
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth-${user.name}.audit
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# Daily Rolling Security appender
-#
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-#
-# hadoop configuration logging
-#
-
-# Uncomment the following line to turn off configuration deprecation warnings.
-# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,NullAppender
-hdfs.audit.log.maxfilesize=256MB
-hdfs.audit.log.maxbackupindex=20
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
-log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
-
-#
-# NameNode metrics logging.
-# The default is to retain two namenode-metrics.log files up to 64MB each.
-#
-namenode.metrics.logger=INFO,NullAppender
-log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
-log4j.additivity.NameNodeMetricsLog=false
-log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
-log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
-log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
-log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
-log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,NullAppender
-mapred.audit.log.maxfilesize=256MB
-mapred.audit.log.maxbackupindex=20
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
-log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
-
-# Custom Logging levels
-
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-# AWS SDK & S3A FileSystem
-log4j.logger.com.amazonaws=ERROR
-log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
-log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file :
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
-hadoop.mapreduce.jobsummary.log.maxbackupindex=20
-log4j.appender.JSA=org.apache.log4j.RollingFileAppender
-log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
-log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
-log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
-log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
-
-#
-# Yarn ResourceManager Application Summary Log 
-#
-# Set the ResourceManager summary log filename
-yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
-# Set the ResourceManager summary log level and appender
-yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-# To enable AppSummaryLogging for the RM, 
-# set yarn.server.resourcemanager.appsummary.logger to 
-# <LEVEL>,RMSUMMARY in hadoop-env.sh
-
-# Appender for ResourceManager Application Summary Log
-# Requires the following properties to be set
-#    - hadoop.log.dir (Hadoop Log directory)
-#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
-log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-
-# HS audit log configs
-#mapreduce.hs.audit.logger=INFO,HSAUDIT
-#log4j.logger.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=${mapreduce.hs.audit.logger}
-#log4j.additivity.org.apache.hadoop.mapreduce.v2.hs.HSAuditLogger=false
-#log4j.appender.HSAUDIT=org.apache.log4j.DailyRollingFileAppender
-#log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log
-#log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout
-#log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd
-
-# Http Server Request Logs
-#log4j.logger.http.requests.namenode=INFO,namenoderequestlog
-#log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
-#log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log
-#log4j.appender.namenoderequestlog.RetainDays=3
-
-#log4j.logger.http.requests.datanode=INFO,datanoderequestlog
-#log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender
-#log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log
-#log4j.appender.datanoderequestlog.RetainDays=3
-
-#log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog
-#log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
-#log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log
-#log4j.appender.resourcemanagerrequestlog.RetainDays=3
-
-#log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog
-#log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
-#log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log
-#log4j.appender.jobhistoryrequestlog.RetainDays=3
-
-#log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog
-#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
-#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
-#log4j.appender.nodemanagerrequestlog.RetainDays=3
-
-# Appender for viewing information for errors and warnings
-yarn.ewma.cleanupInterval=300
-yarn.ewma.messageAgeLimitSeconds=86400
-yarn.ewma.maxUniqueMessages=250
-log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
-log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
-log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
-log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.cmd b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.cmd
deleted file mode 100644
index 0d39526..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.cmd
+++ /dev/null
@@ -1,20 +0,0 @@
-@echo off
-@rem Licensed to the Apache Software Foundation (ASF) under one or more
-@rem contributor license agreements.  See the NOTICE file distributed with
-@rem this work for additional information regarding copyright ownership.
-@rem The ASF licenses this file to You under the Apache License, Version 2.0
-@rem (the "License"); you may not use this file except in compliance with
-@rem the License.  You may obtain a copy of the License at
-@rem
-@rem     http://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-
-set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
-
-set HADOOP_MAPRED_ROOT_LOGGER=%HADOOP_LOGLEVEL%,RFA
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.sh b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.sh
deleted file mode 100644
index 6be1e27..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-env.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-
-export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000
-
-export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA
-
-#export HADOOP_JOB_HISTORYSERVER_OPTS=
-#export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.
-#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.
-#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.
-#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default
-#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-queues.xml.template
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-queues.xml.template b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-queues.xml.template
deleted file mode 100644
index ce6cd20..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-queues.xml.template
+++ /dev/null
@@ -1,92 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- This is the template for queue configuration. The format supports nesting of
-     queues within queues - a feature called hierarchical queues. All queues are
-     defined within the 'queues' tag which is the top level element for this
-     XML document. The queue acls configured here for different queues are
-     checked for authorization only if the configuration property
-     mapreduce.cluster.acls.enabled is set to true. -->
-<queues>
-
-  <!-- Configuration for a queue is specified by defining a 'queue' element. -->
-  <queue>
-
-    <!-- Name of a queue. Queue name cannot contain a ':'  -->
-    <name>default</name>
-
-    <!-- properties for a queue, typically used by schedulers,
-    can be defined here -->
-    <properties>
-    </properties>
-
-	<!-- State of the queue. If running, the queue will accept new jobs.
-         If stopped, the queue will not accept new jobs. -->
-    <state>running</state>
-
-    <!-- Specifies the ACLs to check for submitting jobs to this queue.
-         If set to '*', it allows all users to submit jobs to the queue.
-         If set to ' '(i.e. space), no user will be allowed to do this
-         operation. The default value for any queue acl is ' '.
-         For specifying a list of users and groups the format to use is
-         user1,user2 group1,group2
-
-         It is only used if authorization is enabled in Map/Reduce by setting
-         the configuration property mapreduce.cluster.acls.enabled to true.
-
-         Irrespective of this ACL configuration, the user who started the
-         cluster and cluster administrators configured via
-         mapreduce.cluster.administrators can do this operation. -->
-    <acl-submit-job> </acl-submit-job>
-
-    <!-- Specifies the ACLs to check for viewing and modifying jobs in this
-         queue. Modifications include killing jobs, tasks of jobs or changing
-         priorities.
-         If set to '*', it allows all users to view, modify jobs of the queue.
-         If set to ' '(i.e. space), no user will be allowed to do this
-         operation.
-         For specifying a list of users and groups the format to use is
-         user1,user2 group1,group2
-
-         It is only used if authorization is enabled in Map/Reduce by setting
-         the configuration property mapreduce.cluster.acls.enabled to true.
-
-         Irrespective of this ACL configuration, the user who started the
-         cluster  and cluster administrators configured via
-         mapreduce.cluster.administrators can do the above operations on all
-         the jobs in all the queues. The job owner can do all the above
-         operations on his/her job irrespective of this ACL configuration. -->
-    <acl-administer-jobs> </acl-administer-jobs>
-  </queue>
-
-  <!-- Here is a sample of a hierarchical queue configuration
-       where q2 is a child of q1. In this example, q2 is a leaf level
-       queue as it has no queues configured within it. Currently, ACLs
-       and state are only supported for the leaf level queues.
-       Note also the usage of properties for the queue q2.
-  <queue>
-    <name>q1</name>
-    <queue>
-      <name>q2</name>
-      <properties>
-        <property key="capacity" value="20"/>
-        <property key="user-limit" value="30"/>
-      </properties>
-    </queue>
-  </queue>
- -->
-</queues>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-site.xml.template
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-site.xml.template b/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-site.xml.template
deleted file mode 100644
index 761c352..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/mapred-site.xml.template
+++ /dev/null
@@ -1,21 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/slaves
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/slaves b/contrib/hawq-docker/centos7-docker/hawq-test/conf/slaves
deleted file mode 100644
index 2fbb50c..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/slaves
+++ /dev/null
@@ -1 +0,0 @@
-localhost

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-client.xml.example
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-client.xml.example b/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-client.xml.example
deleted file mode 100644
index a50dce4..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-client.xml.example
+++ /dev/null
@@ -1,80 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-
-<property>
-  <name>ssl.client.truststore.location</name>
-  <value></value>
-  <description>Truststore to be used by clients like distcp. Must be
-  specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.truststore.password</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.truststore.type</name>
-  <value>jks</value>
-  <description>Optional. The keystore file format, default value is "jks".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.truststore.reload.interval</name>
-  <value>10000</value>
-  <description>Truststore reload check interval, in milliseconds.
-  Default value is 10000 (10 seconds).
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.location</name>
-  <value></value>
-  <description>Keystore to be used by clients like distcp. Must be
-  specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.password</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.keypassword</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.type</name>
-  <value>jks</value>
-  <description>Optional. The keystore file format, default value is "jks".
-  </description>
-</property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-server.xml.example
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-server.xml.example b/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-server.xml.example
deleted file mode 100644
index 02d300c..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/ssl-server.xml.example
+++ /dev/null
@@ -1,78 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-
-<property>
-  <name>ssl.server.truststore.location</name>
-  <value></value>
-  <description>Truststore to be used by NN and DN. Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.truststore.password</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.truststore.type</name>
-  <value>jks</value>
-  <description>Optional. The keystore file format, default value is "jks".
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.truststore.reload.interval</name>
-  <value>10000</value>
-  <description>Truststore reload check interval, in milliseconds.
-  Default value is 10000 (10 seconds).
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.location</name>
-  <value></value>
-  <description>Keystore to be used by NN and DN. Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.password</name>
-  <value></value>
-  <description>Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.keypassword</name>
-  <value></value>
-  <description>Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.type</name>
-  <value>jks</value>
-  <description>Optional. The keystore file format, default value is "jks".
-  </description>
-</property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/conf/yarn-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/yarn-env.cmd b/contrib/hawq-docker/centos7-docker/hawq-test/conf/yarn-env.cmd
deleted file mode 100644
index 74da35b..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/yarn-env.cmd
+++ /dev/null
@@ -1,60 +0,0 @@
-@echo off
-@rem Licensed to the Apache Software Foundation (ASF) under one or more
-@rem contributor license agreements.  See the NOTICE file distributed with
-@rem this work for additional information regarding copyright ownership.
-@rem The ASF licenses this file to You under the Apache License, Version 2.0
-@rem (the "License"); you may not use this file except in compliance with
-@rem the License.  You may obtain a copy of the License at
-@rem
-@rem     http://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-
-@rem User for YARN daemons
-if not defined HADOOP_YARN_USER (
-  set HADOOP_YARN_USER=%yarn%
-)
-
-if not defined YARN_CONF_DIR (
-  set YARN_CONF_DIR=%HADOOP_YARN_HOME%\conf
-)
-
-if defined YARN_HEAPSIZE (
-  @rem echo run with Java heapsize %YARN_HEAPSIZE%
-  set JAVA_HEAP_MAX=-Xmx%YARN_HEAPSIZE%m
-)
-
-if not defined YARN_LOG_DIR (
-  set YARN_LOG_DIR=%HADOOP_YARN_HOME%\logs
-)
-
-if not defined YARN_LOGFILE (
-  set YARN_LOGFILE=yarn.log
-)
-
-@rem default policy file for service-level authorization
-if not defined YARN_POLICYFILE (
-  set YARN_POLICYFILE=hadoop-policy.xml
-)
-
-if not defined YARN_ROOT_LOGGER (
-  set YARN_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console
-)
-
-set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.dir=%YARN_LOG_DIR%
-set YARN_OPTS=%YARN_OPTS% -Dyarn.log.dir=%YARN_LOG_DIR%
-set YARN_OPTS=%YARN_OPTS% -Dhadoop.log.file=%YARN_LOGFILE%
-set YARN_OPTS=%YARN_OPTS% -Dyarn.log.file=%YARN_LOGFILE%
-set YARN_OPTS=%YARN_OPTS% -Dyarn.home.dir=%HADOOP_YARN_HOME%
-set YARN_OPTS=%YARN_OPTS% -Dyarn.id.str=%YARN_IDENT_STRING%
-set YARN_OPTS=%YARN_OPTS% -Dhadoop.home.dir=%HADOOP_YARN_HOME%
-set YARN_OPTS=%YARN_OPTS% -Dhadoop.root.logger=%YARN_ROOT_LOGGER%
-set YARN_OPTS=%YARN_OPTS% -Dyarn.root.logger=%YARN_ROOT_LOGGER%
-if defined JAVA_LIBRARY_PATH (
-  set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH%
-)
-set YARN_OPTS=%YARN_OPTS% -Dyarn.policy.file=%YARN_POLICYFILE%
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh b/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
deleted file mode 100755
index abdc508..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-if [ -z "${NAMENODE}" ]; then
-  export NAMENODE=${HOSTNAME}
-fi
-
-if [ ! -f /etc/profile.d/hadoop.sh ]; then
-  echo '#!/bin/bash' | sudo tee /etc/profile.d/hadoop.sh
-  echo "export NAMENODE=${NAMENODE}" | sudo tee -a /etc/profile.d/hadoop.sh
-  sudo chmod a+x /etc/profile.d/hadoop.sh
-fi
-
-sudo start-hdfs.sh
-sudo sysctl -p
-
-exec "$@"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh b/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
deleted file mode 100755
index f39200d..0000000
--- a/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-/usr/sbin/sshd
-
-if [ -f /etc/profile.d/hadoop.sh ]; then
-  . /etc/profile.d/hadoop.sh
-fi
-
-if [ "${NAMENODE}" == "${HOSTNAME}" ]; then
-  if [ ! -d /tmp/hdfs/name/current ]; then
-    su -l hdfs -c "hdfs namenode -format"
-  fi
-  
-  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.namenode.NameNode | grep -v grep`" ]; then
-    su -l hdfs -c "hadoop-daemon.sh start namenode"
-  fi
-else
-  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.datanode.DataNode | grep -v grep`" ]; then
-    su -l hdfs -c "hadoop-daemon.sh start datanode"
-  fi
-fi
-



[25/50] [abbrv] incubator-hawq git commit: HAWQ-1243. Add suffix name for ranger restful service.

Posted by es...@apache.org.
HAWQ-1243. Add suffix name for ranger restful service.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/ec7b4d9e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/ec7b4d9e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/ec7b4d9e

Branch: refs/heads/2.1.0.0-incubating
Commit: ec7b4d9e96834d86a1a7196c9999199997e5068c
Parents: cf54c41
Author: hubertzhang <hu...@apache.org>
Authored: Mon Jan 16 17:13:38 2017 +0800
Committer: hubertzhang <hu...@apache.org>
Committed: Mon Jan 16 17:13:38 2017 +0800

----------------------------------------------------------------------
 src/backend/utils/misc/guc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/ec7b4d9e/src/backend/utils/misc/guc.c
----------------------------------------------------------------------
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 21d705a..e87d514 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -8202,7 +8202,7 @@ static struct config_string ConfigureNamesString[] =
       NULL
     },
     &rps_addr_suffix,
-    "hawq", NULL, NULL
+    "rps", NULL, NULL
   },
 
 	{


[49/50] [abbrv] incubator-hawq git commit: HAWQ-1308. Fixed Javadoc warnings.

Posted by es...@apache.org.
HAWQ-1308. Fixed Javadoc warnings.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/7d02472b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/7d02472b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/7d02472b

Branch: refs/heads/2.1.0.0-incubating
Commit: 7d02472b82e841254d63215c9acdfc51a2ecd7e3
Parents: f6452d2
Author: Oleksandr Diachenko <od...@pivotal.io>
Authored: Thu Feb 2 14:43:45 2017 -0800
Committer: Oleksandr Diachenko <od...@pivotal.io>
Committed: Thu Feb 2 14:44:19 2017 -0800

----------------------------------------------------------------------
 .../java/org/apache/hawq/pxf/api/OutputFormat.java   |  3 ++-
 .../pxf/plugins/hive/utilities/HiveUtilities.java    | 15 ++++++++-------
 2 files changed, 10 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7d02472b/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java b/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
index 89c4b30..4d2d806 100644
--- a/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
+++ b/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
@@ -44,7 +44,8 @@ public enum OutputFormat {
 
     /**
      * Looks up output format for given class name if it exists.
-     *
+     * 
+     * @param className class name implementing certain output format
      * @throws UnsupportedTypeException if output format with given class wasn't found
      * @return an output format with given class name
      */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/7d02472b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
index 37f4ac2..3328c9f 100644
--- a/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
+++ b/pxf/pxf-hive/src/main/java/org/apache/hawq/pxf/plugins/hive/utilities/HiveUtilities.java
@@ -455,11 +455,11 @@ public class HiveUtilities {
     /**
      * The method which serializes fragment-related attributes, needed for reading and resolution to string
      *
-     * @param fragmenterClassName
-     * @param partData
-     * @param filterInFragmenter
+     * @param fragmenterClassName fragmenter class name
+     * @param partData partition data
+     * @param filterInFragmenter whether filtering was done in fragmenter
      * @return serialized representation of fragment-related attributes
-     * @throws Exception
+     * @throws Exception when error occurred during serialization
      */
     @SuppressWarnings("unchecked")
     public static byte[] makeUserData(String fragmenterClassName, HiveTablePartition partData, boolean filterInFragmenter) throws Exception {
@@ -494,7 +494,7 @@ public class HiveUtilities {
      * @param input input data
      * @param supportedSerdes list of allowed serdes in current context
      * @return instance of HiveUserData class
-     * @throws UserDataException
+     * @throws UserDataException when incorrect number of tokens in Hive user data received
      */
     public static HiveUserData parseHiveUserData(InputData input, PXF_HIVE_SERDES... supportedSerdes) throws UserDataException{
         String userData = new String(input.getFragmentUserData());
@@ -610,10 +610,11 @@ public class HiveUtilities {
     /**
      * Creates an instance of a given serde type
      *
-     * @param serdeType
-     * @param allowedSerdes
+     * @param serdeType SerDe type
+     * @param allowedSerdes allowed serdes in current context
      * @return instance of a given serde
      * @throws UnsupportedTypeException if given serde is not allowed in current context
+     * @throws Exception if other error occurred during creation of SerDe instance
      */
     @SuppressWarnings("deprecation")
     public static SerDe createDeserializer(PXF_HIVE_SERDES serdeType, PXF_HIVE_SERDES... allowedSerdes) throws Exception{


[47/50] [abbrv] incubator-hawq git commit: HAWQ-1306. Removed links in Javadoc referencing pxf-service from pxf-api.

Posted by es...@apache.org.
HAWQ-1306. Removed links in Javadoc referencing pxf-service from pxf-api.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/42c1cc13
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/42c1cc13
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/42c1cc13

Branch: refs/heads/2.1.0.0-incubating
Commit: 42c1cc1310c5eadf69c1882ed0f532a057bc1505
Parents: 524e2e5
Author: Oleksandr Diachenko <od...@pivotal.io>
Authored: Wed Feb 1 16:49:23 2017 -0800
Committer: Oleksandr Diachenko <od...@pivotal.io>
Committed: Wed Feb 1 16:49:23 2017 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hawq/pxf/api/OutputFormat.java        | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/42c1cc13/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
----------------------------------------------------------------------
diff --git a/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java b/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
index 565db13..89c4b30 100644
--- a/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
+++ b/pxf/pxf-api/src/main/java/org/apache/hawq/pxf/api/OutputFormat.java
@@ -21,7 +21,7 @@ package org.apache.hawq.pxf.api;
 
 
 /**
- * PXF supported output formats: {@link org.apache.hawq.pxf.service.io.Text} and {@link org.apache.hawq.pxf.service.io.GPDBWritable}
+ * PXF supported output formats, enum which contains serializations classes
  */
 public enum OutputFormat {
     TEXT("org.apache.hawq.pxf.service.io.Text"),


[26/50] [abbrv] incubator-hawq git commit: HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base

Posted by es...@apache.org.
HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/368dbc9e
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/368dbc9e
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/368dbc9e

Branch: refs/heads/2.1.0.0-incubating
Commit: 368dbc9e64a2e62061ea47a0b9c7b15589dad457
Parents: ec7b4d9
Author: Richard Guo <gu...@gmail.com>
Authored: Tue Jan 3 17:12:59 2017 +0800
Committer: Ruilong Huo <rh...@pivotal.io>
Committed: Tue Jan 17 10:57:02 2017 +0800

----------------------------------------------------------------------
 contrib/hawq-docker/Makefile                    | 222 +++++++++++++++++++
 contrib/hawq-docker/README.md                   |  97 ++++++++
 .../centos6-docker/hawq-dev/Dockerfile          | 123 ++++++++++
 .../centos6-docker/hawq-test/Dockerfile         |  40 ++++
 .../centos6-docker/hawq-test/conf/core-site.xml |  24 ++
 .../centos6-docker/hawq-test/conf/hadoop-env.sh | 110 +++++++++
 .../centos6-docker/hawq-test/entrypoint.sh      |  34 +++
 .../centos6-docker/hawq-test/start-hdfs.sh      |  39 ++++
 .../centos7-docker/hawq-dev/Dockerfile          |  75 +++++++
 .../centos7-docker/hawq-test/Dockerfile         |  40 ++++
 .../centos7-docker/hawq-test/conf/core-site.xml |  24 ++
 .../centos7-docker/hawq-test/conf/hadoop-env.sh | 110 +++++++++
 .../centos7-docker/hawq-test/entrypoint.sh      |  33 +++
 .../centos7-docker/hawq-test/start-hdfs.sh      |  39 ++++
 14 files changed, 1010 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/Makefile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/Makefile b/contrib/hawq-docker/Makefile
new file mode 100644
index 0000000..120ebe2
--- /dev/null
+++ b/contrib/hawq-docker/Makefile
@@ -0,0 +1,222 @@
+#!/usr/bin/make all
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+THIS_MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+TOP_DIR := $(abspath $(dir ${THIS_MAKEFILE_PATH}))
+NDATANODES := 3
+CUR_DATANODE := 1
+OS_VERSION := centos7
+# Do not use underscore "_" in CLUSTER_ID
+CLUSTER_ID := $(OS_VERSION)
+# Monut this local directory to /data in data container and share with other containers
+LOCAL := 
+# networks used in docker
+NETWORK := $(CLUSTER_ID)_hawq_network
+
+all: 
+	@echo " Usage:"
+	@echo "    To setup a build and test environment:         make run"
+	@echo "    To start all containers:                       make start"
+	@echo "    To stop all containers:                        make stop"
+	@echo "    To remove hdfs containers:                     make clean"
+	@echo "    To remove all containers:                      make distclean"
+	@echo ""
+	@echo "    To build images locally:                       make build"
+	@echo "    To pull latest images:                         make pull"
+
+build:
+	@make -f $(THIS_MAKEFILE_PATH) build-hawq-dev-$(OS_VERSION)
+	@make -f $(THIS_MAKEFILE_PATH) build-hawq-test-$(OS_VERSION)
+	@echo "Build Images Done!"
+
+build-hawq-dev-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/Dockerfile
+	@echo build hawq-dev:$(OS_VERSION) image
+	docker build -t hawq/hawq-dev:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/
+
+build-hawq-test-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/Dockerfile
+	@echo build hawq-test:$(OS_VERSION) image
+	docker build -t hawq/hawq-test:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/
+
+create-data-container:
+	@echo create ${CLUSTER_ID}-data container
+	@if [ ! -z "$(LOCAL)" -a ! -d "$(LOCAL)" ]; then \
+		echo "LOCAL must be set to a directory!"; \
+		exit 1; \
+	fi
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-data$$" | grep -v CONTAINER`" ]; then \
+		if [ -z "$(LOCAL)" ]; then \
+			docker create -v /data --name=${CLUSTER_ID}-data hawq/hawq-dev:$(OS_VERSION) /bin/true; \
+		else \
+			docker create -v $(LOCAL):/data --name=${CLUSTER_ID}-data hawq/hawq-dev:$(OS_VERSION) /bin/true; \
+		fi \
+	else \
+		echo "${CLUSTER_ID}-data container already exist!"; \
+	fi
+
+run:
+	@if [ -z "`docker network ls 2>/dev/null`" ]; then \
+ 		make -f $(THIS_MAKEFILE_PATH) NETWORK=default create-data-container && \
+		make -f $(THIS_MAKEFILE_PATH) NETWORK=default run-hdfs; \
+	else \
+		if [ -z "`docker network ls 2>/dev/null | grep $(NETWORK)`" ]; then \
+			echo create network $(NETWORK) && \
+			docker network create --driver bridge $(NETWORK); \
+		fi && \
+		make -f $(THIS_MAKEFILE_PATH) create-data-container && \
+		make -f $(THIS_MAKEFILE_PATH) run-hdfs; \
+	fi
+
+run-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) run-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i run-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "HAWQ Environment Setup Done!"
+	@echo 'run "docker exec -it ${CLUSTER_ID}-namenode bash" to attach to ${CLUSTER_ID}-namenode node'
+
+run-namenode-container:
+	@echo "run ${CLUSTER_ID}-namenode container"
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker run --privileged -itd --net=$(NETWORK) --hostname=${CLUSTER_ID}-namenode --name=${CLUSTER_ID}-namenode \
+			--volumes-from ${CLUSTER_ID}-data hawq/hawq-test:$(OS_VERSION); \
+	else \
+		echo "${CLUSTER_ID}-namenode container already exist!"; \
+	fi
+
+run-datanode-container:
+	@echo "run ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker run --privileged -itd --net=$(NETWORK) --hostname=${CLUSTER_ID}-datanode$(CUR_DATANODE) \
+			--name=${CLUSTER_ID}-datanode$(CUR_DATANODE) -e NAMENODE=${CLUSTER_ID}-namenode \
+			--volumes-from ${CLUSTER_ID}-data hawq/hawq-test:$(OS_VERSION); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container already exist!"; \
+	fi
+
+start:
+	@make -f $(THIS_MAKEFILE_PATH) start-hdfs
+	@echo 'run "docker exec -it ${CLUSTER_ID}-namenode bash" to attach to ${CLUSTER_ID}-namenode node'
+
+start-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) start-namenode-container
+	@i=1;\
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i start-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Start All Containers Done!"
+
+start-namenode-container:
+	@echo "start ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker start ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \
+	fi
+
+start-datanode-container:
+	@echo "start ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker start ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \
+	fi
+
+stop:
+	@make -f $(THIS_MAKEFILE_PATH) stop-hdfs
+
+stop-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) stop-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i stop-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Stop All Containers Done!"
+
+stop-namenode-container:
+	@echo "stop ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker stop -t 0 ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!"; \
+	fi
+
+stop-datanode-container:
+	@echo "stop ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker stop -t 0 ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!"; \
+	fi
+
+remove-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) remove-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i remove-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Remove HDFS Done!"
+
+remove-namenode-container:
+	@echo "make ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!"; \
+	fi
+
+remove-datanode-container:
+	@echo "make ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!"; \
+	fi
+
+remove-data:
+	@echo remove ${CLUSTER_ID}-data container
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-data" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-data; \
+	else \
+		echo "${CLUSTER_ID}-data container does not exist!"; \
+	fi
+
+pull:
+	@echo latest images
+	docker pull hawq/hawq-dev:$(OS_VERSION)
+	docker pull hawq/hawq-test:$(OS_VERSION)
+
+clean:
+	@make -f $(THIS_MAKEFILE_PATH) stop 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-hdfs 2>&1 >/dev/null || true
+	@echo "Clean Done!"
+
+distclean:
+	@make -f $(THIS_MAKEFILE_PATH) stop 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-hdfs 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-data 2>&1 >/dev/null || true
+	@if [ ! -z "`docker network ls 2>/dev/null | grep $(NETWORK)`" ]; then \
+		echo remove network $(NETWORK); \
+		docker network rm $(NETWORK) 2>&1 >/dev/null || true; \
+	fi
+	@echo "Distclean Done!"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/README.md
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/README.md b/contrib/hawq-docker/README.md
new file mode 100644
index 0000000..4adeaaf
--- /dev/null
+++ b/contrib/hawq-docker/README.md
@@ -0,0 +1,97 @@
+# hawq-docker
+
+hawq-docker is based on *wangzw's* repo *hawq-devel-env*. It is the docker images and scripts to help developers of Apache HAWQ to setup building and testing environment with docker.
+
+Both CentOS 7 and CentOS 6 are supported.
+Change variable **OS_VERSION** (:= centos7 OR centos6) in Makefile to switch between CentOS 7 and CentOS 6.
+
+Take CentOS 7 as an example below.
+
+# Install docker
+* following the instructions to install docker.
+https://docs.docker.com/
+
+# Setup build and test environment
+* clone hawq repository
+```
+git clone https://github.com/apache/incubator-hawq.git .
+cd incubator-hawq/contrib/hawq-docker
+```
+* Get the docker images
+```
+  make pull (recommended)
+OR
+  make build
+``` 
+(Command `make pull` is to pull docker images from Docker Hub, while command `make build` is to build docker images locally. In general, `make pull` is faster than `make build`.)
+* setup a 5 nodes virtual cluster for Apache HAWQ build and test.
+```
+make run
+```
+Now let's have a look about what we creted.
+```
+[root@localhost hawq-docker]# docker ps -a
+CONTAINER ID        IMAGE                          COMMAND                CREATED             STATUS              PORTS               NAMES
+382b2b3360d1        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode3
+86513c331d45        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode2
+c0ab10e46e4a        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode1
+e27beea63953        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-namenode
+1f986959bd04        hawq/hawq-dev:centos7    "/bin/true"            2 minutes ago       Created                                 centos7-data
+```
+**centos7-data** is a data container and mounted to /data directory on all other containers to provide a shared storage for the cluster. 
+
+# Build and Test Apache HAWQ
+* attach to namenode
+```
+docker exec -it centos7-namenode bash
+```
+* check if HDFS working well
+```
+sudo -u hdfs hdfs dfsadmin -report
+```
+* clone Apache HAWQ code to /data direcotry
+```
+git clone https://github.com/apache/incubator-hawq.git /data/hawq
+```
+* build Apache HAWQ
+```
+cd /data/hawq
+./configure --prefix=/data/hawq-dev
+make
+make install
+```
+(When you are using CentOS 6, run command `scl enable devtoolset-2 bash` before
+configuring hawq and run command `exit` after installing hawq.) 
+* modify Apache HAWQ configuration
+```
+sed 's|localhost|centos7-namenode|g' -i /data/hawq-dev/etc/hawq-site.xml
+echo 'centos7-datanode1' >  /data/hawq-dev/etc/slaves
+echo 'centos7-datanode2' >>  /data/hawq-dev/etc/slaves
+echo 'centos7-datanode3' >>  /data/hawq-dev/etc/slaves
+```
+* Initialize Apache HAWQ cluster
+```
+sudo -u hdfs hdfs dfs -chown gpadmin /
+source /data/hawq-dev/greenplum_path.sh
+hawq init cluster
+```
+Now you can connect to database with `psql` command.
+```
+[gpadmin@centos7-namenode data]$ psql -d postgres
+psql (8.2.15)
+Type "help" for help.
+
+postgres=# 
+```
+# More command with this script
+```
+ Usage:
+    To setup a build and test environment:         make run
+    To start all containers:                       make start
+    To stop all containers:                        make stop
+    To remove hdfs containers:                     make clean
+    To remove all containers:                      make distclean
+    To build images locally:                       make build
+    To pull latest images:                         make pull
+```
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
new file mode 100644
index 0000000..9fb8476
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
@@ -0,0 +1,123 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM centos:6
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+# install all software we need
+RUN yum install -y epel-release && \
+ yum makecache && \
+ yum install -y man passwd sudo tar which git mlocate links make bzip2 \
+ autoconf automake libtool m4 gcc gcc-c++ gdb flex cmake gperf indent \
+ libuuid-devel krb5-devel libgsasl-devel expat-devel libxml2-devel \
+ perl-ExtUtils-Embed pam-devel python-devel snappy-devel \
+ libyaml-devel libevent-devel bzip2-devel openssl-devel \
+ openldap-devel readline-devel net-snmp-devel apr-devel \
+ libesmtp-devel xerces-c-devel python-pip json-c-devel \
+ apache-ivy java-1.7.0-openjdk-devel wget \
+ openssh-clients openssh-server perl-JSON && \
+ yum clean all
+
+# update gcc
+RUN wget -O /etc/yum.repos.d/slc6-devtoolset.repo http://linuxsoft.cern.ch/cern/devtoolset/slc6-devtoolset.repo && \
+ rpm --import http://ftp.scientificlinux.org/linux/scientific/5x/x86_64/RPM-GPG-KEYs/RPM-GPG-KEY-cern && \
+ yum install -y devtoolset-2-gcc devtoolset-2-binutils devtoolset-2-gcc-c++ && \
+ echo "source /opt/rh/devtoolset-2/enable" >> ~/.bashrc && \
+ source ~/.bashrc
+
+# install libcurl 7.45.0
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "http://curl.haxx.se/download/curl-7.45.0.tar.bz2" -o curl-7.45.0.tar.bz2 && \
+ tar -xjf curl-7.45.0.tar.bz2 && cd curl-7.45.0 && \
+ ./configure --prefix=/usr && make && make install && \
+ rm -rf /tmp/build && ldconfig
+
+# install maven
+RUN curl -L "http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo" -o /etc/yum.repos.d/epel-apache-maven.repo && \
+ yum install -y apache-maven && \
+ yum clean all
+
+# OS requirements
+RUN echo "kernel.sem = 250 512000 100 2048" >> /etc/sysctl.conf
+
+# setup ssh server and keys for root
+RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+# setup JAVA_HOME for all users
+RUN echo "#!/bin/sh" > /etc/profile.d/java.sh && \
+ echo "export JAVA_HOME=/etc/alternatives/java_sdk" >> /etc/profile.d/java.sh && \
+ chmod a+x /etc/profile.d/java.sh
+
+# install boost 1.59
+ RUN mkdir -p /tmp/build && \
+  cd /tmp/build && curl -L "http://downloads.sourceforge.net/project/boost/boost/1.59.0/boost_1_59_0.tar.bz2" -o boost_1_59_0.tar.bz2 && \
+  tar -xjf boost_1_59_0.tar.bz2 && cd boost_1_59_0 && \
+  ./bootstrap.sh && ./b2 --prefix=/usr -q && ./b2 --prefix=/usr -q install && \
+  rm -rf /tmp/build
+
+# install bison 2.5.1
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "ftp://ftp.gnu.org/gnu/bison/bison-2.5.1.tar.gz" -o bison-2.5.1.tar.gz && \
+ tar -xzf bison-2.5.1.tar.gz && cd bison-2.5.1 && \
+ ./configure --prefix=/usr && make && make install && \
+ rm -rf /tmp/build
+
+# install thrift 0.9.1
+RUN mkdir -p /tmp/build && \
+ cd /tmp/build && curl -L "https://archive.apache.org/dist/thrift/0.9.1/thrift-0.9.1.tar.gz" -o thrift-0.9.1.tar.gz && \
+ tar -xf thrift-0.9.1.tar.gz && cd thrift-0.9.1 && \
+ ./configure --prefix=/usr --without-tests && \
+ make && make install && \
+ rm -rf /tmp/build
+
+# install protobuf 2.5.0
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.bz2" -o protobuf-2.5.0.tar.bz2 && \
+ tar -xjf protobuf-2.5.0.tar.bz2 && cd protobuf-2.5.0 && \
+ ./configure --prefix=/usr && make && make install && ldconfig && \
+ rm -rf /tmp/build
+
+# install python module 
+RUN pip --retries=50 --timeout=300 install pycrypto
+
+# create user gpadmin since HAWQ cannot run under root
+RUN groupadd -g 1000 gpadmin && \
+ useradd -u 1000 -g 1000 gpadmin && \
+ echo "gpadmin  ALL=(ALL)       NOPASSWD: ALL" > /etc/sudoers.d/gpadmin
+
+# sudo should not require tty
+RUN sed -i -e 's|Defaults    requiretty|#Defaults    requiretty|' /etc/sudoers
+
+RUN echo "#!/bin/bash" > /etc/profile.d/user.sh && \
+ echo "export USER=\`whoami\`" >> /etc/profile.d/user.sh && \
+ chmod a+x /etc/profile.d/user.sh
+
+ENV BASEDIR /data
+RUN mkdir -p /data && chmod 777 /data
+
+USER gpadmin
+
+# setup ssh client keys for gpadmin
+RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+WORKDIR /data
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile b/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
new file mode 100644
index 0000000..94a04fe
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM hawq/hawq-dev:centos6
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+USER root
+
+# install HDP 2.5.0
+RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0/hdp.repo" -o /etc/yum.repos.d/hdp.repo && \
+ yum install -y hadoop hadoop-hdfs hadoop-libhdfs hadoop-yarn hadoop-mapreduce hadoop-client hdp-select && \
+ yum clean all
+
+RUN ln -s /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh /usr/bin/hadoop-daemon.sh
+
+COPY conf/* /etc/hadoop/conf/
+
+COPY entrypoint.sh /usr/bin/entrypoint.sh
+COPY start-hdfs.sh /usr/bin/start-hdfs.sh
+
+USER gpadmin
+
+ENTRYPOINT ["entrypoint.sh"]
+CMD ["bash"]
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
new file mode 100644
index 0000000..afc37fc
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+	<property>
+		<name>fs.defaultFS</name>
+		<value>hdfs://${hdfs.namenode}:8020</value>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
new file mode 100644
index 0000000..95511ed
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
@@ -0,0 +1,110 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.
+export JAVA_HOME=/etc/alternatives/java_sdk
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol.  Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+#export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+#for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+#  if [ "$HADOOP_CLASSPATH" ]; then
+#    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+#  else
+#    export HADOOP_CLASSPATH=$f
+#  fi
+#done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Setup environment variable for docker image
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ -z "${NAMENODE}" ]; then
+  echo "environment variable NAMENODE is not set!"
+  exit 1
+fi
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Dhdfs.namenode=${NAMENODE}"
+#export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+#export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
+#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+
+#export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+#export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+#export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+#export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol.  This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+#export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=/var/log/hadoop
+export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+#export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by
+#       the user that will run the hadoop daemons.  Otherwise there is the
+#       potential for a symlink attack.
+#export HADOOP_PID_DIR=${HADOOP_PID_DIR}
+#export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+
+# A string representing this instance of hadoop. $USER by default.
+#export HADOOP_IDENT_STRING=$USER

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh b/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
new file mode 100755
index 0000000..2c03287
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/entrypoint.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+if [ -z "${NAMENODE}" ]; then
+  export NAMENODE=${HOSTNAME}
+fi
+
+if [ ! -f /etc/profile.d/hadoop.sh ]; then
+  echo '#!/bin/bash' | sudo tee /etc/profile.d/hadoop.sh
+  echo "export NAMENODE=${NAMENODE}" | sudo tee -a /etc/profile.d/hadoop.sh
+  sudo chmod a+x /etc/profile.d/hadoop.sh
+fi
+
+sudo start-hdfs.sh
+sudo sysctl -p
+sudo ln -s /usr/lib/libthrift-0.9.1.so /usr/lib64/libthrift-0.9.1.so
+
+exec "$@"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh b/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
new file mode 100755
index 0000000..076fb0a
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/start-hdfs.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+/etc/init.d/sshd start
+
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ "${NAMENODE}" == "${HOSTNAME}" ]; then
+  if [ ! -d /tmp/hdfs/name/current ]; then
+    su -l hdfs -c "hdfs namenode -format"
+  fi
+  
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.namenode.NameNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start namenode"
+  fi
+else
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.datanode.DataNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start datanode"
+  fi
+fi
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
new file mode 100644
index 0000000..58d4ef0
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile
@@ -0,0 +1,75 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM centos:7
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+# install all software we need
+RUN yum install -y epel-release && \
+ yum makecache && \
+ yum install -y man passwd sudo tar which git mlocate links make bzip2 net-tools \
+ autoconf automake libtool m4 gcc gcc-c++ gdb bison flex cmake gperf maven indent \
+ libuuid-devel krb5-devel libgsasl-devel expat-devel libxml2-devel \
+ perl-ExtUtils-Embed pam-devel python-devel libcurl-devel snappy-devel \
+ thrift-devel libyaml-devel libevent-devel bzip2-devel openssl-devel \
+ openldap-devel protobuf-devel readline-devel net-snmp-devel apr-devel \
+ libesmtp-devel python-pip json-c-devel \
+ java-1.7.0-openjdk-devel lcov cmake \
+ openssh-clients openssh-server perl-JSON && \
+ yum clean all
+
+RUN pip --retries=50 --timeout=300 install pycrypto
+
+# OS requirement
+RUN echo "kernel.sem = 250 512000 100 2048" >> /etc/sysctl.conf
+
+# setup ssh server and keys for root
+RUN sshd-keygen && \
+ ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+# create user gpadmin since HAWQ cannot run under root
+RUN groupadd -g 1000 gpadmin && \
+ useradd -u 1000 -g 1000 gpadmin && \
+ echo "gpadmin  ALL=(ALL)       NOPASSWD: ALL" > /etc/sudoers.d/gpadmin
+
+# sudo should not require tty
+RUN sed -i -e 's|Defaults    requiretty|#Defaults    requiretty|' /etc/sudoers
+
+# setup JAVA_HOME for all users
+RUN echo "#!/bin/sh" > /etc/profile.d/java.sh && \
+ echo "export JAVA_HOME=/etc/alternatives/java_sdk" >> /etc/profile.d/java.sh && \
+ chmod a+x /etc/profile.d/java.sh
+
+# set USER env
+RUN echo "#!/bin/bash" > /etc/profile.d/user.sh && \
+ echo "export USER=\`whoami\`" >> /etc/profile.d/user.sh && \
+ chmod a+x /etc/profile.d/user.sh
+
+ENV BASEDIR /data
+RUN mkdir -p /data && chmod 777 /data
+
+USER gpadmin
+
+# setup ssh client keys for gpadmin
+RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+WORKDIR /data

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
new file mode 100644
index 0000000..ea5e22c
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM hawq/hawq-dev:centos7
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+USER root
+
+## install HDP 2.5.0
+RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.0.0/hdp.repo" -o /etc/yum.repos.d/hdp.repo && \
+ yum install -y hadoop hadoop-hdfs hadoop-libhdfs hadoop-yarn hadoop-mapreduce hadoop-client hdp-select && \
+ yum clean all
+
+RUN ln -s /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh /usr/bin/hadoop-daemon.sh
+
+COPY conf/* /etc/hadoop/conf/
+
+COPY entrypoint.sh /usr/bin/entrypoint.sh
+COPY start-hdfs.sh /usr/bin/start-hdfs.sh
+
+USER gpadmin
+
+ENTRYPOINT ["entrypoint.sh"]
+CMD ["bash"]
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
new file mode 100644
index 0000000..afc37fc
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+	<property>
+		<name>fs.defaultFS</name>
+		<value>hdfs://${hdfs.namenode}:8020</value>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
new file mode 100644
index 0000000..95511ed
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/hadoop-env.sh
@@ -0,0 +1,110 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.
+export JAVA_HOME=/etc/alternatives/java_sdk
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol.  Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+#export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+#for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+#  if [ "$HADOOP_CLASSPATH" ]; then
+#    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+#  else
+#    export HADOOP_CLASSPATH=$f
+#  fi
+#done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Setup environment variable for docker image
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ -z "${NAMENODE}" ]; then
+  echo "environment variable NAMENODE is not set!"
+  exit 1
+fi
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Dhdfs.namenode=${NAMENODE}"
+#export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+#export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
+#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+
+#export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+#export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+#export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+#export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol.  This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+#export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=/var/log/hadoop
+export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+#export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by
+#       the user that will run the hadoop daemons.  Otherwise there is the
+#       potential for a symlink attack.
+#export HADOOP_PID_DIR=${HADOOP_PID_DIR}
+#export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+
+# A string representing this instance of hadoop. $USER by default.
+#export HADOOP_IDENT_STRING=$USER

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh b/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
new file mode 100755
index 0000000..abdc508
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+if [ -z "${NAMENODE}" ]; then
+  export NAMENODE=${HOSTNAME}
+fi
+
+if [ ! -f /etc/profile.d/hadoop.sh ]; then
+  echo '#!/bin/bash' | sudo tee /etc/profile.d/hadoop.sh
+  echo "export NAMENODE=${NAMENODE}" | sudo tee -a /etc/profile.d/hadoop.sh
+  sudo chmod a+x /etc/profile.d/hadoop.sh
+fi
+
+sudo start-hdfs.sh
+sudo sysctl -p
+
+exec "$@"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/368dbc9e/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh b/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
new file mode 100755
index 0000000..f39200d
--- /dev/null
+++ b/contrib/hawq-docker/centos7-docker/hawq-test/start-hdfs.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+/usr/sbin/sshd
+
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ "${NAMENODE}" == "${HOSTNAME}" ]; then
+  if [ ! -d /tmp/hdfs/name/current ]; then
+    su -l hdfs -c "hdfs namenode -format"
+  fi
+  
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.namenode.NameNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start namenode"
+  fi
+else
+  if [ -z "`ps aux | grep org.apache.hadoop.hdfs.server.datanode.DataNode | grep -v grep`" ]; then
+    su -l hdfs -c "hadoop-daemon.sh start datanode"
+  fi
+fi
+



[19/50] [abbrv] incubator-hawq git commit: HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base.

Posted by es...@apache.org.
HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/440ce595
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/440ce595
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/440ce595

Branch: refs/heads/2.1.0.0-incubating
Commit: 440ce595a2298ac9be16973f0c7c4c358ddb2cd0
Parents: c8be9f2
Author: Richard Guo <gu...@gmail.com>
Authored: Tue Jan 3 17:12:59 2017 +0800
Committer: Ruilong Huo <rh...@pivotal.io>
Committed: Fri Jan 13 19:16:28 2017 +0800

----------------------------------------------------------------------
 contrib/hawq-docker/Makefile                    | 222 ++++++++++++++
 contrib/hawq-docker/README.md                   |  97 +++++++
 .../centos6-docker/hawq-dev/Dockerfile          | 123 ++++++++
 .../centos6-docker/hawq-test/Dockerfile         |  40 +++
 .../hawq-test/conf/capacity-scheduler.xml       | 134 +++++++++
 .../hawq-test/conf/configuration.xsl            |  40 +++
 .../hawq-test/conf/container-executor.cfg       |   4 +
 .../centos6-docker/hawq-test/conf/core-site.xml |  24 ++
 .../hawq-test/conf/hadoop-env.cmd               |  92 ++++++
 .../centos6-docker/hawq-test/conf/hadoop-env.sh | 110 +++++++
 .../hawq-test/conf/hadoop-metrics.properties    |  75 +++++
 .../hawq-test/conf/hadoop-metrics2.properties   |  68 +++++
 .../hawq-test/conf/hadoop-policy.xml            | 226 ++++++++++++++
 .../centos6-docker/hawq-test/conf/hdfs-site.xml | 100 +++++++
 .../centos6-docker/hawq-test/conf/kms-acls.xml  | 135 +++++++++
 .../centos6-docker/hawq-test/conf/kms-env.sh    |  55 ++++
 .../hawq-test/conf/kms-log4j.properties         |  38 +++
 .../centos6-docker/hawq-test/conf/kms-site.xml  | 173 +++++++++++
 .../hawq-test/conf/log4j.properties             | 291 +++++++++++++++++++
 .../hawq-test/conf/mapred-env.cmd               |  20 ++
 .../centos6-docker/hawq-test/conf/mapred-env.sh |  27 ++
 .../hawq-test/conf/mapred-queues.xml.template   |  92 ++++++
 .../hawq-test/conf/mapred-site.xml.template     |  21 ++
 .../centos6-docker/hawq-test/conf/slaves        |   1 +
 .../hawq-test/conf/ssl-client.xml.example       |  80 +++++
 .../hawq-test/conf/ssl-server.xml.example       |  78 +++++
 .../centos6-docker/hawq-test/conf/yarn-env.cmd  |  60 ++++
 .../centos6-docker/hawq-test/entrypoint.sh      |  34 +++
 .../centos6-docker/hawq-test/start-hdfs.sh      |  39 +++
 .../centos7-docker/hawq-dev/Dockerfile          |  75 +++++
 .../centos7-docker/hawq-test/Dockerfile         |  40 +++
 .../hawq-test/conf/capacity-scheduler.xml       | 134 +++++++++
 .../hawq-test/conf/configuration.xsl            |  40 +++
 .../hawq-test/conf/container-executor.cfg       |   4 +
 .../centos7-docker/hawq-test/conf/core-site.xml |  24 ++
 .../hawq-test/conf/hadoop-env.cmd               |  92 ++++++
 .../centos7-docker/hawq-test/conf/hadoop-env.sh | 110 +++++++
 .../hawq-test/conf/hadoop-metrics.properties    |  75 +++++
 .../hawq-test/conf/hadoop-metrics2.properties   |  68 +++++
 .../hawq-test/conf/hadoop-policy.xml            | 226 ++++++++++++++
 .../centos7-docker/hawq-test/conf/hdfs-site.xml | 100 +++++++
 .../centos7-docker/hawq-test/conf/kms-acls.xml  | 135 +++++++++
 .../centos7-docker/hawq-test/conf/kms-env.sh    |  55 ++++
 .../hawq-test/conf/kms-log4j.properties         |  38 +++
 .../centos7-docker/hawq-test/conf/kms-site.xml  | 173 +++++++++++
 .../hawq-test/conf/log4j.properties             | 291 +++++++++++++++++++
 .../hawq-test/conf/mapred-env.cmd               |  20 ++
 .../centos7-docker/hawq-test/conf/mapred-env.sh |  27 ++
 .../hawq-test/conf/mapred-queues.xml.template   |  92 ++++++
 .../hawq-test/conf/mapred-site.xml.template     |  21 ++
 .../centos7-docker/hawq-test/conf/slaves        |   1 +
 .../hawq-test/conf/ssl-client.xml.example       |  80 +++++
 .../hawq-test/conf/ssl-server.xml.example       |  78 +++++
 .../centos7-docker/hawq-test/conf/yarn-env.cmd  |  60 ++++
 .../centos7-docker/hawq-test/entrypoint.sh      |  33 +++
 .../centos7-docker/hawq-test/start-hdfs.sh      |  39 +++
 56 files changed, 4630 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/Makefile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/Makefile b/contrib/hawq-docker/Makefile
new file mode 100644
index 0000000..120ebe2
--- /dev/null
+++ b/contrib/hawq-docker/Makefile
@@ -0,0 +1,222 @@
+#!/usr/bin/make all
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+THIS_MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
+TOP_DIR := $(abspath $(dir ${THIS_MAKEFILE_PATH}))
+NDATANODES := 3
+CUR_DATANODE := 1
+OS_VERSION := centos7
+# Do not use underscore "_" in CLUSTER_ID
+CLUSTER_ID := $(OS_VERSION)
+# Monut this local directory to /data in data container and share with other containers
+LOCAL := 
+# networks used in docker
+NETWORK := $(CLUSTER_ID)_hawq_network
+
+all: 
+	@echo " Usage:"
+	@echo "    To setup a build and test environment:         make run"
+	@echo "    To start all containers:                       make start"
+	@echo "    To stop all containers:                        make stop"
+	@echo "    To remove hdfs containers:                     make clean"
+	@echo "    To remove all containers:                      make distclean"
+	@echo ""
+	@echo "    To build images locally:                       make build"
+	@echo "    To pull latest images:                         make pull"
+
+build:
+	@make -f $(THIS_MAKEFILE_PATH) build-hawq-dev-$(OS_VERSION)
+	@make -f $(THIS_MAKEFILE_PATH) build-hawq-test-$(OS_VERSION)
+	@echo "Build Images Done!"
+
+build-hawq-dev-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/Dockerfile
+	@echo build hawq-dev:$(OS_VERSION) image
+	docker build -t hawq/hawq-dev:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/
+
+build-hawq-test-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/Dockerfile
+	@echo build hawq-test:$(OS_VERSION) image
+	docker build -t hawq/hawq-test:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/
+
+create-data-container:
+	@echo create ${CLUSTER_ID}-data container
+	@if [ ! -z "$(LOCAL)" -a ! -d "$(LOCAL)" ]; then \
+		echo "LOCAL must be set to a directory!"; \
+		exit 1; \
+	fi
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-data$$" | grep -v CONTAINER`" ]; then \
+		if [ -z "$(LOCAL)" ]; then \
+			docker create -v /data --name=${CLUSTER_ID}-data hawq/hawq-dev:$(OS_VERSION) /bin/true; \
+		else \
+			docker create -v $(LOCAL):/data --name=${CLUSTER_ID}-data hawq/hawq-dev:$(OS_VERSION) /bin/true; \
+		fi \
+	else \
+		echo "${CLUSTER_ID}-data container already exist!"; \
+	fi
+
+run:
+	@if [ -z "`docker network ls 2>/dev/null`" ]; then \
+ 		make -f $(THIS_MAKEFILE_PATH) NETWORK=default create-data-container && \
+		make -f $(THIS_MAKEFILE_PATH) NETWORK=default run-hdfs; \
+	else \
+		if [ -z "`docker network ls 2>/dev/null | grep $(NETWORK)`" ]; then \
+			echo create network $(NETWORK) && \
+			docker network create --driver bridge $(NETWORK); \
+		fi && \
+		make -f $(THIS_MAKEFILE_PATH) create-data-container && \
+		make -f $(THIS_MAKEFILE_PATH) run-hdfs; \
+	fi
+
+run-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) run-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i run-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "HAWQ Environment Setup Done!"
+	@echo 'run "docker exec -it ${CLUSTER_ID}-namenode bash" to attach to ${CLUSTER_ID}-namenode node'
+
+run-namenode-container:
+	@echo "run ${CLUSTER_ID}-namenode container"
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker run --privileged -itd --net=$(NETWORK) --hostname=${CLUSTER_ID}-namenode --name=${CLUSTER_ID}-namenode \
+			--volumes-from ${CLUSTER_ID}-data hawq/hawq-test:$(OS_VERSION); \
+	else \
+		echo "${CLUSTER_ID}-namenode container already exist!"; \
+	fi
+
+run-datanode-container:
+	@echo "run ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker run --privileged -itd --net=$(NETWORK) --hostname=${CLUSTER_ID}-datanode$(CUR_DATANODE) \
+			--name=${CLUSTER_ID}-datanode$(CUR_DATANODE) -e NAMENODE=${CLUSTER_ID}-namenode \
+			--volumes-from ${CLUSTER_ID}-data hawq/hawq-test:$(OS_VERSION); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container already exist!"; \
+	fi
+
+start:
+	@make -f $(THIS_MAKEFILE_PATH) start-hdfs
+	@echo 'run "docker exec -it ${CLUSTER_ID}-namenode bash" to attach to ${CLUSTER_ID}-namenode node'
+
+start-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) start-namenode-container
+	@i=1;\
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i start-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Start All Containers Done!"
+
+start-namenode-container:
+	@echo "start ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker start ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \
+	fi
+
+start-datanode-container:
+	@echo "start ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker start ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \
+	fi
+
+stop:
+	@make -f $(THIS_MAKEFILE_PATH) stop-hdfs
+
+stop-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) stop-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i stop-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Stop All Containers Done!"
+
+stop-namenode-container:
+	@echo "stop ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker stop -t 0 ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!"; \
+	fi
+
+stop-datanode-container:
+	@echo "stop ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker stop -t 0 ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!"; \
+	fi
+
+remove-hdfs:
+	@make -f $(THIS_MAKEFILE_PATH) remove-namenode-container
+	@i=1; \
+	while [ $$i -le $(NDATANODES) ] ; do \
+		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i remove-datanode-container; \
+		i=$$((i+1)); \
+	done
+	@echo "Remove HDFS Done!"
+
+remove-namenode-container:
+	@echo "make ${CLUSTER_ID}-namenode container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-namenode; \
+	else \
+		echo "${CLUSTER_ID}-namenode container does not exist!"; \
+	fi
+
+remove-datanode-container:
+	@echo "make ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
+	else \
+		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!"; \
+	fi
+
+remove-data:
+	@echo remove ${CLUSTER_ID}-data container
+	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-data" | grep -v CONTAINER`" ]; then \
+		docker rm -v ${CLUSTER_ID}-data; \
+	else \
+		echo "${CLUSTER_ID}-data container does not exist!"; \
+	fi
+
+pull:
+	@echo latest images
+	docker pull hawq/hawq-dev:$(OS_VERSION)
+	docker pull hawq/hawq-test:$(OS_VERSION)
+
+clean:
+	@make -f $(THIS_MAKEFILE_PATH) stop 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-hdfs 2>&1 >/dev/null || true
+	@echo "Clean Done!"
+
+distclean:
+	@make -f $(THIS_MAKEFILE_PATH) stop 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-hdfs 2>&1 >/dev/null || true
+	@make -f $(THIS_MAKEFILE_PATH) remove-data 2>&1 >/dev/null || true
+	@if [ ! -z "`docker network ls 2>/dev/null | grep $(NETWORK)`" ]; then \
+		echo remove network $(NETWORK); \
+		docker network rm $(NETWORK) 2>&1 >/dev/null || true; \
+	fi
+	@echo "Distclean Done!"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/README.md
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/README.md b/contrib/hawq-docker/README.md
new file mode 100644
index 0000000..4adeaaf
--- /dev/null
+++ b/contrib/hawq-docker/README.md
@@ -0,0 +1,97 @@
+# hawq-docker
+
+hawq-docker is based on *wangzw's* repo *hawq-devel-env*. It is the docker images and scripts to help developers of Apache HAWQ to setup building and testing environment with docker.
+
+Both CentOS 7 and CentOS 6 are supported.
+Change variable **OS_VERSION** (:= centos7 OR centos6) in Makefile to switch between CentOS 7 and CentOS 6.
+
+Take CentOS 7 as an example below.
+
+# Install docker
+* following the instructions to install docker.
+https://docs.docker.com/
+
+# Setup build and test environment
+* clone hawq repository
+```
+git clone https://github.com/apache/incubator-hawq.git .
+cd incubator-hawq/contrib/hawq-docker
+```
+* Get the docker images
+```
+  make pull (recommended)
+OR
+  make build
+``` 
+(Command `make pull` is to pull docker images from Docker Hub, while command `make build` is to build docker images locally. In general, `make pull` is faster than `make build`.)
+* setup a 5 nodes virtual cluster for Apache HAWQ build and test.
+```
+make run
+```
+Now let's have a look about what we creted.
+```
+[root@localhost hawq-docker]# docker ps -a
+CONTAINER ID        IMAGE                          COMMAND                CREATED             STATUS              PORTS               NAMES
+382b2b3360d1        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode3
+86513c331d45        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode2
+c0ab10e46e4a        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode1
+e27beea63953        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-namenode
+1f986959bd04        hawq/hawq-dev:centos7    "/bin/true"            2 minutes ago       Created                                 centos7-data
+```
+**centos7-data** is a data container and mounted to /data directory on all other containers to provide a shared storage for the cluster. 
+
+# Build and Test Apache HAWQ
+* attach to namenode
+```
+docker exec -it centos7-namenode bash
+```
+* check if HDFS working well
+```
+sudo -u hdfs hdfs dfsadmin -report
+```
+* clone Apache HAWQ code to /data direcotry
+```
+git clone https://github.com/apache/incubator-hawq.git /data/hawq
+```
+* build Apache HAWQ
+```
+cd /data/hawq
+./configure --prefix=/data/hawq-dev
+make
+make install
+```
+(When you are using CentOS 6, run command `scl enable devtoolset-2 bash` before
+configuring hawq and run command `exit` after installing hawq.) 
+* modify Apache HAWQ configuration
+```
+sed 's|localhost|centos7-namenode|g' -i /data/hawq-dev/etc/hawq-site.xml
+echo 'centos7-datanode1' >  /data/hawq-dev/etc/slaves
+echo 'centos7-datanode2' >>  /data/hawq-dev/etc/slaves
+echo 'centos7-datanode3' >>  /data/hawq-dev/etc/slaves
+```
+* Initialize Apache HAWQ cluster
+```
+sudo -u hdfs hdfs dfs -chown gpadmin /
+source /data/hawq-dev/greenplum_path.sh
+hawq init cluster
+```
+Now you can connect to database with `psql` command.
+```
+[gpadmin@centos7-namenode data]$ psql -d postgres
+psql (8.2.15)
+Type "help" for help.
+
+postgres=# 
+```
+# More command with this script
+```
+ Usage:
+    To setup a build and test environment:         make run
+    To start all containers:                       make start
+    To stop all containers:                        make stop
+    To remove hdfs containers:                     make clean
+    To remove all containers:                      make distclean
+    To build images locally:                       make build
+    To pull latest images:                         make pull
+```
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
new file mode 100644
index 0000000..9fb8476
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
@@ -0,0 +1,123 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM centos:6
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+# install all software we need
+RUN yum install -y epel-release && \
+ yum makecache && \
+ yum install -y man passwd sudo tar which git mlocate links make bzip2 \
+ autoconf automake libtool m4 gcc gcc-c++ gdb flex cmake gperf indent \
+ libuuid-devel krb5-devel libgsasl-devel expat-devel libxml2-devel \
+ perl-ExtUtils-Embed pam-devel python-devel snappy-devel \
+ libyaml-devel libevent-devel bzip2-devel openssl-devel \
+ openldap-devel readline-devel net-snmp-devel apr-devel \
+ libesmtp-devel xerces-c-devel python-pip json-c-devel \
+ apache-ivy java-1.7.0-openjdk-devel wget \
+ openssh-clients openssh-server perl-JSON && \
+ yum clean all
+
+# update gcc
+RUN wget -O /etc/yum.repos.d/slc6-devtoolset.repo http://linuxsoft.cern.ch/cern/devtoolset/slc6-devtoolset.repo && \
+ rpm --import http://ftp.scientificlinux.org/linux/scientific/5x/x86_64/RPM-GPG-KEYs/RPM-GPG-KEY-cern && \
+ yum install -y devtoolset-2-gcc devtoolset-2-binutils devtoolset-2-gcc-c++ && \
+ echo "source /opt/rh/devtoolset-2/enable" >> ~/.bashrc && \
+ source ~/.bashrc
+
+# install libcurl 7.45.0
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "http://curl.haxx.se/download/curl-7.45.0.tar.bz2" -o curl-7.45.0.tar.bz2 && \
+ tar -xjf curl-7.45.0.tar.bz2 && cd curl-7.45.0 && \
+ ./configure --prefix=/usr && make && make install && \
+ rm -rf /tmp/build && ldconfig
+
+# install maven
+RUN curl -L "http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo" -o /etc/yum.repos.d/epel-apache-maven.repo && \
+ yum install -y apache-maven && \
+ yum clean all
+
+# OS requirements
+RUN echo "kernel.sem = 250 512000 100 2048" >> /etc/sysctl.conf
+
+# setup ssh server and keys for root
+RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+# setup JAVA_HOME for all users
+RUN echo "#!/bin/sh" > /etc/profile.d/java.sh && \
+ echo "export JAVA_HOME=/etc/alternatives/java_sdk" >> /etc/profile.d/java.sh && \
+ chmod a+x /etc/profile.d/java.sh
+
+# install boost 1.59
+ RUN mkdir -p /tmp/build && \
+  cd /tmp/build && curl -L "http://downloads.sourceforge.net/project/boost/boost/1.59.0/boost_1_59_0.tar.bz2" -o boost_1_59_0.tar.bz2 && \
+  tar -xjf boost_1_59_0.tar.bz2 && cd boost_1_59_0 && \
+  ./bootstrap.sh && ./b2 --prefix=/usr -q && ./b2 --prefix=/usr -q install && \
+  rm -rf /tmp/build
+
+# install bison 2.5.1
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "ftp://ftp.gnu.org/gnu/bison/bison-2.5.1.tar.gz" -o bison-2.5.1.tar.gz && \
+ tar -xzf bison-2.5.1.tar.gz && cd bison-2.5.1 && \
+ ./configure --prefix=/usr && make && make install && \
+ rm -rf /tmp/build
+
+# install thrift 0.9.1
+RUN mkdir -p /tmp/build && \
+ cd /tmp/build && curl -L "https://archive.apache.org/dist/thrift/0.9.1/thrift-0.9.1.tar.gz" -o thrift-0.9.1.tar.gz && \
+ tar -xf thrift-0.9.1.tar.gz && cd thrift-0.9.1 && \
+ ./configure --prefix=/usr --without-tests && \
+ make && make install && \
+ rm -rf /tmp/build
+
+# install protobuf 2.5.0
+RUN mkdir -p /tmp/build/ && \
+ cd /tmp/build && curl -L "https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.bz2" -o protobuf-2.5.0.tar.bz2 && \
+ tar -xjf protobuf-2.5.0.tar.bz2 && cd protobuf-2.5.0 && \
+ ./configure --prefix=/usr && make && make install && ldconfig && \
+ rm -rf /tmp/build
+
+# install python module 
+RUN pip --retries=50 --timeout=300 install pycrypto
+
+# create user gpadmin since HAWQ cannot run under root
+RUN groupadd -g 1000 gpadmin && \
+ useradd -u 1000 -g 1000 gpadmin && \
+ echo "gpadmin  ALL=(ALL)       NOPASSWD: ALL" > /etc/sudoers.d/gpadmin
+
+# sudo should not require tty
+RUN sed -i -e 's|Defaults    requiretty|#Defaults    requiretty|' /etc/sudoers
+
+RUN echo "#!/bin/bash" > /etc/profile.d/user.sh && \
+ echo "export USER=\`whoami\`" >> /etc/profile.d/user.sh && \
+ chmod a+x /etc/profile.d/user.sh
+
+ENV BASEDIR /data
+RUN mkdir -p /data && chmod 777 /data
+
+USER gpadmin
+
+# setup ssh client keys for gpadmin
+RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
+ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
+ chmod 0600 ~/.ssh/authorized_keys
+
+WORKDIR /data
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile b/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
new file mode 100644
index 0000000..94a04fe
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+FROM hawq/hawq-dev:centos6
+
+MAINTAINER Richard Guo <ri...@pivotal.io>
+
+USER root
+
+# install HDP 2.5.0
+RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0/hdp.repo" -o /etc/yum.repos.d/hdp.repo && \
+ yum install -y hadoop hadoop-hdfs hadoop-libhdfs hadoop-yarn hadoop-mapreduce hadoop-client hdp-select && \
+ yum clean all
+
+RUN ln -s /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh /usr/bin/hadoop-daemon.sh
+
+COPY conf/* /etc/hadoop/conf/
+
+COPY entrypoint.sh /usr/bin/entrypoint.sh
+COPY start-hdfs.sh /usr/bin/start-hdfs.sh
+
+USER gpadmin
+
+ENTRYPOINT ["entrypoint.sh"]
+CMD ["bash"]
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml
new file mode 100644
index 0000000..30f4eb9
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml
@@ -0,0 +1,134 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.1</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run 
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.resource-calculator</name>
+    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    <description>
+      The ResourceCalculator implementation to be used to compare 
+      Resources in the scheduler.
+      The default i.e. DefaultResourceCalculator only uses Memory while
+      DominantResourceCalculator uses dominant-resource to compare 
+      multi-dimensional resources such as Memory, CPU etc.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.node-locality-delay</name>
+    <value>40</value>
+    <description>
+      Number of missed scheduling opportunities after which the CapacityScheduler 
+      attempts to schedule rack-local containers. 
+      Typically this should be set to number of nodes in the cluster, By default is setting 
+      approximately number of nodes in one rack which is 40.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.queue-mappings</name>
+    <value></value>
+    <description>
+      A list of mappings that will be used to assign jobs to queues
+      The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
+      Typically this list will be used to map users to queues,
+      for example, u:%user:%user maps all users to queues with the same name
+      as the user.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
+    <value>false</value>
+    <description>
+      If a queue mapping is present, will it override the value specified
+      by the user? This can be used by administrators to place jobs in queues
+      that are different than the one specified by the user.
+      The default is false.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl b/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl
new file mode 100644
index 0000000..d50d80b
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+<tr>
+  <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+  <td><xsl:value-of select="value"/></td>
+  <td><xsl:value-of select="description"/></td>
+</tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg b/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg
new file mode 100644
index 0000000..d68cee8
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg
@@ -0,0 +1,4 @@
+yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group
+banned.users=#comma separated list of users who can not run applications
+min.user.id=1000#Prevent other super-users
+allowed.system.users=##comma separated list of system users who CAN run applications

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
new file mode 100644
index 0000000..afc37fc
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+	<property>
+		<name>fs.defaultFS</name>
+		<value>hdfs://${hdfs.namenode}:8020</value>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd
new file mode 100644
index 0000000..bb40ec9
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd
@@ -0,0 +1,92 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem Set Hadoop-specific environment variables here.
+
+@rem The only required environment variable is JAVA_HOME.  All others are
+@rem optional.  When running a distributed configuration it is best to
+@rem set JAVA_HOME in this file, so that it is correctly defined on
+@rem remote nodes.
+
+@rem The java implementation to use.  Required.
+set JAVA_HOME=%JAVA_HOME%
+
+@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
+@rem set JSVC_HOME=%JSVC_HOME%
+
+@rem set HADOOP_CONF_DIR=
+
+@rem Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+if exist %HADOOP_HOME%\contrib\capacity-scheduler (
+  if not defined HADOOP_CLASSPATH (
+    set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+  ) else (
+    set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
+  )
+)
+
+@rem If TEZ_CLASSPATH is defined in the env, that means that TEZ is enabled
+@rem append it to the HADOOP_CLASSPATH
+
+if defined TEZ_CLASSPATH (
+  if not defined HADOOP_CLASSPATH (
+    set HADOOP_CLASSPATH=%TEZ_CLASSPATH%
+  ) else (
+    set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%TEZ_CLASSPATH%
+  )
+)
+
+@rem The maximum amount of heap to use, in MB. Default is 1000.
+@rem set HADOOP_HEAPSIZE=
+@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+@rem Extra Java runtime options.  Empty by default.
+@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
+
+@rem Command specific options appended to HADOOP_OPTS when specified
+if not defined HADOOP_SECURITY_LOGGER (
+  set HADOOP_SECURITY_LOGGER=INFO,RFAS
+)
+if not defined HDFS_AUDIT_LOGGER (
+  set HDFS_AUDIT_LOGGER=INFO,NullAppender
+)
+
+set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
+set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
+set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
+
+@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
+@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
+
+@rem On secure datanodes, user to run the datanode as after dropping privileges
+set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
+
+@rem Where log files are stored.  %HADOOP_HOME%/logs by default.
+@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
+
+@rem Where log files are stored in the secure data environment.
+set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
+
+@rem The directory where pid files are stored. /tmp by default.
+@rem NOTE: this should be set to a directory that can only be written to by 
+@rem       the user that will run the hadoop daemons.  Otherwise there is the
+@rem       potential for a symlink attack.
+set HADOOP_PID_DIR=%HADOOP_PID_DIR%
+set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
+
+@rem A string representing this instance of hadoop. %USERNAME% by default.
+set HADOOP_IDENT_STRING=%USERNAME%

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
new file mode 100644
index 0000000..95511ed
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
@@ -0,0 +1,110 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.
+export JAVA_HOME=/etc/alternatives/java_sdk
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol.  Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+#export JSVC_HOME=${JSVC_HOME}
+
+#export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+
+# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
+#for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+#  if [ "$HADOOP_CLASSPATH" ]; then
+#    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+#  else
+#    export HADOOP_CLASSPATH=$f
+#  fi
+#done
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+
+# Setup environment variable for docker image
+if [ -f /etc/profile.d/hadoop.sh ]; then
+  . /etc/profile.d/hadoop.sh
+fi
+
+if [ -z "${NAMENODE}" ]; then
+  echo "environment variable NAMENODE is not set!"
+  exit 1
+fi
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Dhdfs.namenode=${NAMENODE}"
+#export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+
+# Command specific options appended to HADOOP_OPTS when specified
+#export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
+#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+
+#export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+
+#export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+#export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+#export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol.  This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+#export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=/var/log/hadoop
+export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+
+# Where log files are stored in the secure data environment.
+#export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_MOVER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+# The directory where pid files are stored. /tmp by default.
+# NOTE: this should be set to a directory that can only be written to by
+#       the user that will run the hadoop daemons.  Otherwise there is the
+#       potential for a symlink attack.
+#export HADOOP_PID_DIR=${HADOOP_PID_DIR}
+#export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+
+# A string representing this instance of hadoop. $USER by default.
+#export HADOOP_IDENT_STRING=$USER

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties
new file mode 100644
index 0000000..c1b2eb7
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties
@@ -0,0 +1,75 @@
+# Configuration of the "dfs" context for null
+dfs.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "dfs" context for file
+#dfs.class=org.apache.hadoop.metrics.file.FileContext
+#dfs.period=10
+#dfs.fileName=/tmp/dfsmetrics.log
+
+# Configuration of the "dfs" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# dfs.period=10
+# dfs.servers=localhost:8649
+
+
+# Configuration of the "mapred" context for null
+mapred.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "mapred" context for file
+#mapred.class=org.apache.hadoop.metrics.file.FileContext
+#mapred.period=10
+#mapred.fileName=/tmp/mrmetrics.log
+
+# Configuration of the "mapred" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# mapred.period=10
+# mapred.servers=localhost:8649
+
+
+# Configuration of the "jvm" context for null
+#jvm.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "jvm" context for file
+#jvm.class=org.apache.hadoop.metrics.file.FileContext
+#jvm.period=10
+#jvm.fileName=/tmp/jvmmetrics.log
+
+# Configuration of the "jvm" context for ganglia
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# jvm.period=10
+# jvm.servers=localhost:8649
+
+# Configuration of the "rpc" context for null
+rpc.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "rpc" context for file
+#rpc.class=org.apache.hadoop.metrics.file.FileContext
+#rpc.period=10
+#rpc.fileName=/tmp/rpcmetrics.log
+
+# Configuration of the "rpc" context for ganglia
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# rpc.period=10
+# rpc.servers=localhost:8649
+
+
+# Configuration of the "ugi" context for null
+ugi.class=org.apache.hadoop.metrics.spi.NullContext
+
+# Configuration of the "ugi" context for file
+#ugi.class=org.apache.hadoop.metrics.file.FileContext
+#ugi.period=10
+#ugi.fileName=/tmp/ugimetrics.log
+
+# Configuration of the "ugi" context for ganglia
+# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+# ugi.period=10
+# ugi.servers=localhost:8649
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties
new file mode 100644
index 0000000..0c09228
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties
@@ -0,0 +1,68 @@
+# syntax: [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
+# default sampling period, in seconds
+*.period=10
+
+# The namenode-metrics.out will contain metrics from all context
+#namenode.sink.file.filename=namenode-metrics.out
+# Specifying a special sampling period for namenode:
+#namenode.sink.*.period=8
+
+#datanode.sink.file.filename=datanode-metrics.out
+
+#resourcemanager.sink.file.filename=resourcemanager-metrics.out
+
+#nodemanager.sink.file.filename=nodemanager-metrics.out
+
+#mrappmaster.sink.file.filename=mrappmaster-metrics.out
+
+#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
+
+# the following example split metrics of different
+# context to different sinks (in this case files)
+#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_jvm.context=jvm
+#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
+#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_mapred.context=mapred
+#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
+
+#
+# Below are for sending metrics to Ganglia
+#
+# for Ganglia 3.0 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
+#
+# for Ganglia 3.1 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+
+# *.sink.ganglia.period=10
+
+# default for supportsparse is false
+# *.sink.ganglia.supportsparse=true
+
+#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Tag values to use for the ganglia prefix. If not defined no tags are used.
+# If '*' all tags are used. If specifiying multiple tags separate them with 
+# commas. Note that the last segment of the property name is the context name.
+#
+#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
+#*.sink.ganglia.tagsForPrefix.dfs=
+#*.sink.ganglia.tagsForPrefix.rpc=
+#*.sink.ganglia.tagsForPrefix.mapred=
+
+#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml
new file mode 100644
index 0000000..2bf5c02
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml
@@ -0,0 +1,226 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ 
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.user.mappings.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.ha.service.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HAService protocol used by HAAdmin to manage the
+      active and stand-by states of namenode.</description>
+  </property>
+
+  <property>
+    <name>security.zkfc.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for access to the ZK Failover Controller
+    </description>
+  </property>
+
+  <property>
+    <name>security.qjournal.service.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for QJournalProtocol, used by the NN to communicate with
+    JNs when using the QuorumJournalManager for edit logs.</description>
+  </property>
+
+  <property>
+    <name>security.mrhs.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HSClientProtocol, used by job clients to
+    communciate with the MR History Server job status etc. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <!-- YARN Protocols -->
+
+  <property>
+    <name>security.resourcetracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceTrackerProtocol, used by the
+    ResourceManager and NodeManager to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.resourcemanager-administration.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceManagerAdministrationProtocol, for admin commands. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationclient.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationClientProtocol, used by the ResourceManager 
+    and applications submission clients to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationmaster.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationMasterProtocol, used by the ResourceManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.containermanagement.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager 
+    and ApplicationMasters to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.resourcelocalizer.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ResourceLocalizer protocol, used by the NodeManager 
+    and ResourceLocalizer to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for MRClientProtocol, used by job clients to
+    communciate with the MR ApplicationMaster to query job status etc. 
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.applicationhistory.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ApplicationHistoryProtocol, used by the timeline
+    server and the generic history service client to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml
new file mode 100644
index 0000000..f565658
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml
@@ -0,0 +1,100 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+	<property>
+		<name>dfs.name.dir</name>
+		<value>/tmp/hdfs/name</value>
+		<final>true</final>
+	</property>
+
+	<property>
+		<name>dfs.data.dir</name>
+		<value>/tmp/hdfs/data</value>
+		<final>true</final>
+	</property>
+
+	<property>
+		<name>dfs.permissions</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.support.append</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.block.local-path-access.user</name>
+		<value>${user.name}</value>
+	</property>
+
+	<property>
+		<name>dfs.replication</name>
+		<value>3</value>
+	</property>
+
+	<property>
+		<name>dfs.datanode.socket.write.timeout</name>
+		<value>0</value>
+		<description>
+			used for sockets to and from datanodes. It is 8 minutes by default. Some
+			users set this to 0, effectively disabling the write timeout.
+		</description>
+	</property>
+
+	<property>
+		<name>dfs.webhdfs.enabled</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.allow.truncate</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.namenode.fs-limits.min-block-size</name>
+		<value>1024</value>
+	</property>
+
+	<property>
+		<name>dfs.client.read.shortcircuit</name>
+		<value>true</value>
+	</property>
+
+	<property>
+		<name>dfs.domain.socket.path</name>
+		<value>/var/lib/hadoop-hdfs/dn_socket</value>
+	</property>
+
+	<property>
+		<name>dfs.block.access.token.enable</name>
+		<value>true</value>
+		<description>
+			If "true", access tokens are used as capabilities for accessing
+			datanodes.
+			If "false", no access tokens are checked on accessing datanodes.
+		</description>
+	</property>
+
+	<property>
+		<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
+		<value>false</value>
+	</property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml
new file mode 100644
index 0000000..cba69f4
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+  <!-- This file is hot-reloaded when it changes -->
+
+  <!-- KMS ACLs -->
+
+  <property>
+    <name>hadoop.kms.acl.CREATE</name>
+    <value>*</value>
+    <description>
+      ACL for create-key operations.
+      If the user is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DELETE</name>
+    <value>*</value>
+    <description>
+      ACL for delete-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.ROLLOVER</name>
+    <value>*</value>
+    <description>
+      ACL for rollover-key operations.
+      If the user is not in the GET ACL, the key material is not returned
+      as part of the response.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-version and get-current-key operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_KEYS</name>
+    <value>*</value>
+    <description>
+      ACL for get-keys operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GET_METADATA</name>
+    <value>*</value>
+    <description>
+      ACL for get-key-metadata and get-keys-metadata operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
+    <value>*</value>
+    <description>
+      Complementary ACL for CREATE and ROLLOVER operations to allow the client
+      to provide the key material when creating or rolling a key.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.GENERATE_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for generateEncryptedKey CryptoExtension operations.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.acl.DECRYPT_EEK</name>
+    <value>*</value>
+    <description>
+      ACL for decryptEncryptedKey CryptoExtension operations.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.MANAGEMENT</name>
+    <value>*</value>
+    <description>
+      default ACL for MANAGEMENT operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.GENERATE_EEK</name>
+    <value>*</value>
+    <description>
+      default ACL for GENERATE_EEK operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.DECRYPT_EEK</name>
+    <value>*</value>
+    <description>
+      default ACL for DECRYPT_EEK operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+  <property>
+    <name>default.key.acl.READ</name>
+    <value>*</value>
+    <description>
+      default ACL for READ operations for all key acls that are not
+      explicitly defined.
+    </description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh
new file mode 100644
index 0000000..44dfe6a
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under the License is distributed on an "AS IS" BASIS,
+#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#  See the License for the specific language governing permissions and
+#  limitations under the License. See accompanying LICENSE file.
+#
+
+# Set kms specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs KMS
+# Java System properties for KMS should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# KMS logs directory
+#
+# export KMS_LOG=${KMS_HOME}/logs
+
+# KMS temporary directory
+#
+# export KMS_TEMP=${KMS_HOME}/temp
+
+# The HTTP port used by KMS
+#
+# export KMS_HTTP_PORT=16000
+
+# The Admin port used by KMS
+#
+# export KMS_ADMIN_PORT=`expr ${KMS_HTTP_PORT} + 1`
+
+# The maximum number of Tomcat handler threads
+#
+# export KMS_MAX_THREADS=1000
+
+# The location of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
+
+# The password of the SSL keystore if using SSL
+#
+# export KMS_SSL_KEYSTORE_PASS=password
+
+# The full path to any native libraries that need to be loaded
+# (For eg. location of natively compiled tomcat Apache portable
+# runtime (APR) libraries
+#
+# export JAVA_LIBRARY_PATH=${HOME}/lib/native

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties
new file mode 100644
index 0000000..8e6d909
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties
@@ -0,0 +1,38 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'kms.log.dir' is not defined at KMS start up time
+# Setup sets its value to '${kms.home}/logs'
+
+log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms.File=${kms.log.dir}/kms.log
+log4j.appender.kms.Append=true
+log4j.appender.kms.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n
+
+log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd
+log4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log
+log4j.appender.kms-audit.Append=true
+log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n
+
+log4j.logger.kms-audit=INFO, kms-audit
+log4j.additivity.kms-audit=false
+
+log4j.rootLogger=ALL, kms
+log4j.logger.org.apache.hadoop.conf=ERROR
+log4j.logger.org.apache.hadoop=INFO
+log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/440ce595/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml
new file mode 100644
index 0000000..a810ca4
--- /dev/null
+++ b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml
@@ -0,0 +1,173 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<configuration>
+
+  <!-- KMS Backend KeyProvider -->
+
+  <property>
+    <name>hadoop.kms.key.provider.uri</name>
+    <value>jceks://file@/${user.home}/kms.keystore</value>
+    <description>
+      URI of the backing KeyProvider for the KMS.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.security.keystore.JavaKeyStoreProvider.password</name>
+    <value>none</value>
+    <description>
+      If using the JavaKeyStoreProvider, the password for the keystore file.
+    </description>
+  </property>
+
+  <!-- KMS Cache -->
+
+  <property>
+    <name>hadoop.kms.cache.enable</name>
+    <value>true</value>
+    <description>
+      Whether the KMS will act as a cache for the backing KeyProvider.
+      When the cache is enabled, operations like getKeyVersion, getMetadata,
+      and getCurrentKey will sometimes return cached data without consulting
+      the backing KeyProvider. Cached values are flushed when keys are deleted
+      or modified.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.cache.timeout.ms</name>
+    <value>600000</value>
+    <description>
+      Expiry time for the KMS key version and key metadata cache, in
+      milliseconds. This affects getKeyVersion and getMetadata.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.current.key.cache.timeout.ms</name>
+    <value>30000</value>
+    <description>
+      Expiry time for the KMS current key cache, in milliseconds. This
+      affects getCurrentKey operations.
+    </description>
+  </property>
+
+  <!-- KMS Audit -->
+
+  <property>
+    <name>hadoop.kms.audit.aggregation.window.ms</name>
+    <value>10000</value>
+    <description>
+      Duplicate audit log events within the aggregation window (specified in
+      ms) are quashed to reduce log traffic. A single message for aggregated
+      events is printed at the end of the window, along with a count of the
+      number of aggregated events.
+    </description>
+  </property>
+
+  <!-- KMS Security -->
+
+  <property>
+    <name>hadoop.kms.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication type for the KMS. Can be either &quot;simple&quot;
+      or &quot;kerberos&quot;.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.keytab</name>
+    <value>${user.home}/kms.keytab</value>
+    <description>
+      Path to the keytab with credentials for the configured Kerberos principal.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.principal</name>
+    <value>HTTP/localhost</value>
+    <description>
+      The Kerberos principal to use for the HTTP endpoint.
+      The principal must start with 'HTTP/' as per the Kerberos HTTP SPNEGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.kerberos.name.rules</name>
+    <value>DEFAULT</value>
+    <description>
+      Rules used to resolve Kerberos principal names.
+    </description>
+  </property>
+
+  <!-- Authentication cookie signature source -->
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider</name>
+    <value>random</value>
+    <description>
+      Indicates how the secret to sign the authentication cookies will be
+      stored. Options are 'random' (default), 'string' and 'zookeeper'.
+      If using a setup with multiple KMS instances, 'zookeeper' should be used.
+    </description>
+  </property>
+
+  <!-- Configuration for 'zookeeper' authentication cookie signature source -->
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.path</name>
+    <value>/hadoop-kms/hadoop-auth-signature-secret</value>
+    <description>
+      The Zookeeper ZNode path where the KMS instances will store and retrieve
+      the secret from.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string</name>
+    <value>#HOSTNAME#:#PORT#,...</value>
+    <description>
+      The Zookeeper connection string, a list of hostnames and port comma
+      separated.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type</name>
+    <value>kerberos</value>
+    <description>
+      The Zookeeper authentication type, 'none' or 'sasl' (Kerberos).
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab</name>
+    <value>/etc/hadoop/conf/kms.keytab</value>
+    <description>
+      The absolute path for the Kerberos keytab with the credentials to
+      connect to Zookeeper.
+    </description>
+  </property>
+
+  <property>
+    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal</name>
+    <value>kms/#HOSTNAME#</value>
+    <description>
+      The Kerberos service principal used to connect to Zookeeper.
+    </description>
+  </property>
+
+</configuration>


[22/50] [abbrv] incubator-hawq git commit: Revert "HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base."

Posted by es...@apache.org.
Revert "HAWQ-1248. Merge Dockerfiles for HAWQ Dev into HAWQ code base."

This reverts commit 440ce595a2298ac9be16973f0c7c4c358ddb2cd0.

Reason for revert: RAT check fail with unapproved license in some of the files added


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/1cb29096
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/1cb29096
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/1cb29096

Branch: refs/heads/2.1.0.0-incubating
Commit: 1cb29096ca89f4ed6f54ab795dfad518131969f2
Parents: 440ce59
Author: Ruilong Huo <rh...@pivotal.io>
Authored: Fri Jan 13 19:38:13 2017 +0800
Committer: Ruilong Huo <rh...@pivotal.io>
Committed: Sat Jan 14 11:40:11 2017 +0800

----------------------------------------------------------------------
 contrib/hawq-docker/Makefile                    | 222 --------------
 contrib/hawq-docker/README.md                   |  97 -------
 .../centos6-docker/hawq-dev/Dockerfile          | 123 --------
 .../centos6-docker/hawq-test/Dockerfile         |  40 ---
 .../hawq-test/conf/capacity-scheduler.xml       | 134 ---------
 .../hawq-test/conf/configuration.xsl            |  40 ---
 .../hawq-test/conf/container-executor.cfg       |   4 -
 .../centos6-docker/hawq-test/conf/core-site.xml |  24 --
 .../hawq-test/conf/hadoop-env.cmd               |  92 ------
 .../centos6-docker/hawq-test/conf/hadoop-env.sh | 110 -------
 .../hawq-test/conf/hadoop-metrics.properties    |  75 -----
 .../hawq-test/conf/hadoop-metrics2.properties   |  68 -----
 .../hawq-test/conf/hadoop-policy.xml            | 226 --------------
 .../centos6-docker/hawq-test/conf/hdfs-site.xml | 100 -------
 .../centos6-docker/hawq-test/conf/kms-acls.xml  | 135 ---------
 .../centos6-docker/hawq-test/conf/kms-env.sh    |  55 ----
 .../hawq-test/conf/kms-log4j.properties         |  38 ---
 .../centos6-docker/hawq-test/conf/kms-site.xml  | 173 -----------
 .../hawq-test/conf/log4j.properties             | 291 -------------------
 .../hawq-test/conf/mapred-env.cmd               |  20 --
 .../centos6-docker/hawq-test/conf/mapred-env.sh |  27 --
 .../hawq-test/conf/mapred-queues.xml.template   |  92 ------
 .../hawq-test/conf/mapred-site.xml.template     |  21 --
 .../centos6-docker/hawq-test/conf/slaves        |   1 -
 .../hawq-test/conf/ssl-client.xml.example       |  80 -----
 .../hawq-test/conf/ssl-server.xml.example       |  78 -----
 .../centos6-docker/hawq-test/conf/yarn-env.cmd  |  60 ----
 .../centos6-docker/hawq-test/entrypoint.sh      |  34 ---
 .../centos6-docker/hawq-test/start-hdfs.sh      |  39 ---
 .../centos7-docker/hawq-dev/Dockerfile          |  75 -----
 .../centos7-docker/hawq-test/Dockerfile         |  40 ---
 .../hawq-test/conf/capacity-scheduler.xml       | 134 ---------
 .../hawq-test/conf/configuration.xsl            |  40 ---
 .../hawq-test/conf/container-executor.cfg       |   4 -
 .../centos7-docker/hawq-test/conf/core-site.xml |  24 --
 .../hawq-test/conf/hadoop-env.cmd               |  92 ------
 .../centos7-docker/hawq-test/conf/hadoop-env.sh | 110 -------
 .../hawq-test/conf/hadoop-metrics.properties    |  75 -----
 .../hawq-test/conf/hadoop-metrics2.properties   |  68 -----
 .../hawq-test/conf/hadoop-policy.xml            | 226 --------------
 .../centos7-docker/hawq-test/conf/hdfs-site.xml | 100 -------
 .../centos7-docker/hawq-test/conf/kms-acls.xml  | 135 ---------
 .../centos7-docker/hawq-test/conf/kms-env.sh    |  55 ----
 .../hawq-test/conf/kms-log4j.properties         |  38 ---
 .../centos7-docker/hawq-test/conf/kms-site.xml  | 173 -----------
 .../hawq-test/conf/log4j.properties             | 291 -------------------
 .../hawq-test/conf/mapred-env.cmd               |  20 --
 .../centos7-docker/hawq-test/conf/mapred-env.sh |  27 --
 .../hawq-test/conf/mapred-queues.xml.template   |  92 ------
 .../hawq-test/conf/mapred-site.xml.template     |  21 --
 .../centos7-docker/hawq-test/conf/slaves        |   1 -
 .../hawq-test/conf/ssl-client.xml.example       |  80 -----
 .../hawq-test/conf/ssl-server.xml.example       |  78 -----
 .../centos7-docker/hawq-test/conf/yarn-env.cmd  |  60 ----
 .../centos7-docker/hawq-test/entrypoint.sh      |  33 ---
 .../centos7-docker/hawq-test/start-hdfs.sh      |  39 ---
 56 files changed, 4630 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/Makefile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/Makefile b/contrib/hawq-docker/Makefile
deleted file mode 100644
index 120ebe2..0000000
--- a/contrib/hawq-docker/Makefile
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/make all
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-THIS_MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
-TOP_DIR := $(abspath $(dir ${THIS_MAKEFILE_PATH}))
-NDATANODES := 3
-CUR_DATANODE := 1
-OS_VERSION := centos7
-# Do not use underscore "_" in CLUSTER_ID
-CLUSTER_ID := $(OS_VERSION)
-# Monut this local directory to /data in data container and share with other containers
-LOCAL := 
-# networks used in docker
-NETWORK := $(CLUSTER_ID)_hawq_network
-
-all: 
-	@echo " Usage:"
-	@echo "    To setup a build and test environment:         make run"
-	@echo "    To start all containers:                       make start"
-	@echo "    To stop all containers:                        make stop"
-	@echo "    To remove hdfs containers:                     make clean"
-	@echo "    To remove all containers:                      make distclean"
-	@echo ""
-	@echo "    To build images locally:                       make build"
-	@echo "    To pull latest images:                         make pull"
-
-build:
-	@make -f $(THIS_MAKEFILE_PATH) build-hawq-dev-$(OS_VERSION)
-	@make -f $(THIS_MAKEFILE_PATH) build-hawq-test-$(OS_VERSION)
-	@echo "Build Images Done!"
-
-build-hawq-dev-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/Dockerfile
-	@echo build hawq-dev:$(OS_VERSION) image
-	docker build -t hawq/hawq-dev:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/
-
-build-hawq-test-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/Dockerfile
-	@echo build hawq-test:$(OS_VERSION) image
-	docker build -t hawq/hawq-test:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/
-
-create-data-container:
-	@echo create ${CLUSTER_ID}-data container
-	@if [ ! -z "$(LOCAL)" -a ! -d "$(LOCAL)" ]; then \
-		echo "LOCAL must be set to a directory!"; \
-		exit 1; \
-	fi
-	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-data$$" | grep -v CONTAINER`" ]; then \
-		if [ -z "$(LOCAL)" ]; then \
-			docker create -v /data --name=${CLUSTER_ID}-data hawq/hawq-dev:$(OS_VERSION) /bin/true; \
-		else \
-			docker create -v $(LOCAL):/data --name=${CLUSTER_ID}-data hawq/hawq-dev:$(OS_VERSION) /bin/true; \
-		fi \
-	else \
-		echo "${CLUSTER_ID}-data container already exist!"; \
-	fi
-
-run:
-	@if [ -z "`docker network ls 2>/dev/null`" ]; then \
- 		make -f $(THIS_MAKEFILE_PATH) NETWORK=default create-data-container && \
-		make -f $(THIS_MAKEFILE_PATH) NETWORK=default run-hdfs; \
-	else \
-		if [ -z "`docker network ls 2>/dev/null | grep $(NETWORK)`" ]; then \
-			echo create network $(NETWORK) && \
-			docker network create --driver bridge $(NETWORK); \
-		fi && \
-		make -f $(THIS_MAKEFILE_PATH) create-data-container && \
-		make -f $(THIS_MAKEFILE_PATH) run-hdfs; \
-	fi
-
-run-hdfs:
-	@make -f $(THIS_MAKEFILE_PATH) run-namenode-container
-	@i=1; \
-	while [ $$i -le $(NDATANODES) ] ; do \
-		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i run-datanode-container; \
-		i=$$((i+1)); \
-	done
-	@echo "HAWQ Environment Setup Done!"
-	@echo 'run "docker exec -it ${CLUSTER_ID}-namenode bash" to attach to ${CLUSTER_ID}-namenode node'
-
-run-namenode-container:
-	@echo "run ${CLUSTER_ID}-namenode container"
-	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker run --privileged -itd --net=$(NETWORK) --hostname=${CLUSTER_ID}-namenode --name=${CLUSTER_ID}-namenode \
-			--volumes-from ${CLUSTER_ID}-data hawq/hawq-test:$(OS_VERSION); \
-	else \
-		echo "${CLUSTER_ID}-namenode container already exist!"; \
-	fi
-
-run-datanode-container:
-	@echo "run ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
-	@if [ -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
-		docker run --privileged -itd --net=$(NETWORK) --hostname=${CLUSTER_ID}-datanode$(CUR_DATANODE) \
-			--name=${CLUSTER_ID}-datanode$(CUR_DATANODE) -e NAMENODE=${CLUSTER_ID}-namenode \
-			--volumes-from ${CLUSTER_ID}-data hawq/hawq-test:$(OS_VERSION); \
-	else \
-		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container already exist!"; \
-	fi
-
-start:
-	@make -f $(THIS_MAKEFILE_PATH) start-hdfs
-	@echo 'run "docker exec -it ${CLUSTER_ID}-namenode bash" to attach to ${CLUSTER_ID}-namenode node'
-
-start-hdfs:
-	@make -f $(THIS_MAKEFILE_PATH) start-namenode-container
-	@i=1;\
-	while [ $$i -le $(NDATANODES) ] ; do \
-		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i start-datanode-container; \
-		i=$$((i+1)); \
-	done
-	@echo "Start All Containers Done!"
-
-start-namenode-container:
-	@echo "start ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker start ${CLUSTER_ID}-namenode; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \
-	fi
-
-start-datanode-container:
-	@echo "start ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
-		docker start ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
-	else \
-		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \
-	fi
-
-stop:
-	@make -f $(THIS_MAKEFILE_PATH) stop-hdfs
-
-stop-hdfs:
-	@make -f $(THIS_MAKEFILE_PATH) stop-namenode-container
-	@i=1; \
-	while [ $$i -le $(NDATANODES) ] ; do \
-		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i stop-datanode-container; \
-		i=$$((i+1)); \
-	done
-	@echo "Stop All Containers Done!"
-
-stop-namenode-container:
-	@echo "stop ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker stop -t 0 ${CLUSTER_ID}-namenode; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!"; \
-	fi
-
-stop-datanode-container:
-	@echo "stop ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
-		docker stop -t 0 ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
-	else \
-		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!"; \
-	fi
-
-remove-hdfs:
-	@make -f $(THIS_MAKEFILE_PATH) remove-namenode-container
-	@i=1; \
-	while [ $$i -le $(NDATANODES) ] ; do \
-		make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i remove-datanode-container; \
-		i=$$((i+1)); \
-	done
-	@echo "Remove HDFS Done!"
-
-remove-namenode-container:
-	@echo "make ${CLUSTER_ID}-namenode container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \
-		docker rm -v ${CLUSTER_ID}-namenode; \
-	else \
-		echo "${CLUSTER_ID}-namenode container does not exist!"; \
-	fi
-
-remove-datanode-container:
-	@echo "make ${CLUSTER_ID}-datanode$(CUR_DATANODE) container"
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \
-		docker rm -v ${CLUSTER_ID}-datanode$(CUR_DATANODE); \
-	else \
-		echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!"; \
-	fi
-
-remove-data:
-	@echo remove ${CLUSTER_ID}-data container
-	@if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-data" | grep -v CONTAINER`" ]; then \
-		docker rm -v ${CLUSTER_ID}-data; \
-	else \
-		echo "${CLUSTER_ID}-data container does not exist!"; \
-	fi
-
-pull:
-	@echo latest images
-	docker pull hawq/hawq-dev:$(OS_VERSION)
-	docker pull hawq/hawq-test:$(OS_VERSION)
-
-clean:
-	@make -f $(THIS_MAKEFILE_PATH) stop 2>&1 >/dev/null || true
-	@make -f $(THIS_MAKEFILE_PATH) remove-hdfs 2>&1 >/dev/null || true
-	@echo "Clean Done!"
-
-distclean:
-	@make -f $(THIS_MAKEFILE_PATH) stop 2>&1 >/dev/null || true
-	@make -f $(THIS_MAKEFILE_PATH) remove-hdfs 2>&1 >/dev/null || true
-	@make -f $(THIS_MAKEFILE_PATH) remove-data 2>&1 >/dev/null || true
-	@if [ ! -z "`docker network ls 2>/dev/null | grep $(NETWORK)`" ]; then \
-		echo remove network $(NETWORK); \
-		docker network rm $(NETWORK) 2>&1 >/dev/null || true; \
-	fi
-	@echo "Distclean Done!"

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/README.md
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/README.md b/contrib/hawq-docker/README.md
deleted file mode 100644
index 4adeaaf..0000000
--- a/contrib/hawq-docker/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-# hawq-docker
-
-hawq-docker is based on *wangzw's* repo *hawq-devel-env*. It is the docker images and scripts to help developers of Apache HAWQ to setup building and testing environment with docker.
-
-Both CentOS 7 and CentOS 6 are supported.
-Change variable **OS_VERSION** (:= centos7 OR centos6) in Makefile to switch between CentOS 7 and CentOS 6.
-
-Take CentOS 7 as an example below.
-
-# Install docker
-* following the instructions to install docker.
-https://docs.docker.com/
-
-# Setup build and test environment
-* clone hawq repository
-```
-git clone https://github.com/apache/incubator-hawq.git .
-cd incubator-hawq/contrib/hawq-docker
-```
-* Get the docker images
-```
-  make pull (recommended)
-OR
-  make build
-``` 
-(Command `make pull` is to pull docker images from Docker Hub, while command `make build` is to build docker images locally. In general, `make pull` is faster than `make build`.)
-* setup a 5 nodes virtual cluster for Apache HAWQ build and test.
-```
-make run
-```
-Now let's have a look about what we creted.
-```
-[root@localhost hawq-docker]# docker ps -a
-CONTAINER ID        IMAGE                          COMMAND                CREATED             STATUS              PORTS               NAMES
-382b2b3360d1        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode3
-86513c331d45        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode2
-c0ab10e46e4a        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-datanode1
-e27beea63953        hawq/hawq-test:centos7   "entrypoint.sh bash"   2 minutes ago       Up 2 minutes                            centos7-namenode
-1f986959bd04        hawq/hawq-dev:centos7    "/bin/true"            2 minutes ago       Created                                 centos7-data
-```
-**centos7-data** is a data container and mounted to /data directory on all other containers to provide a shared storage for the cluster. 
-
-# Build and Test Apache HAWQ
-* attach to namenode
-```
-docker exec -it centos7-namenode bash
-```
-* check if HDFS working well
-```
-sudo -u hdfs hdfs dfsadmin -report
-```
-* clone Apache HAWQ code to /data direcotry
-```
-git clone https://github.com/apache/incubator-hawq.git /data/hawq
-```
-* build Apache HAWQ
-```
-cd /data/hawq
-./configure --prefix=/data/hawq-dev
-make
-make install
-```
-(When you are using CentOS 6, run command `scl enable devtoolset-2 bash` before
-configuring hawq and run command `exit` after installing hawq.) 
-* modify Apache HAWQ configuration
-```
-sed 's|localhost|centos7-namenode|g' -i /data/hawq-dev/etc/hawq-site.xml
-echo 'centos7-datanode1' >  /data/hawq-dev/etc/slaves
-echo 'centos7-datanode2' >>  /data/hawq-dev/etc/slaves
-echo 'centos7-datanode3' >>  /data/hawq-dev/etc/slaves
-```
-* Initialize Apache HAWQ cluster
-```
-sudo -u hdfs hdfs dfs -chown gpadmin /
-source /data/hawq-dev/greenplum_path.sh
-hawq init cluster
-```
-Now you can connect to database with `psql` command.
-```
-[gpadmin@centos7-namenode data]$ psql -d postgres
-psql (8.2.15)
-Type "help" for help.
-
-postgres=# 
-```
-# More command with this script
-```
- Usage:
-    To setup a build and test environment:         make run
-    To start all containers:                       make start
-    To stop all containers:                        make stop
-    To remove hdfs containers:                     make clean
-    To remove all containers:                      make distclean
-    To build images locally:                       make build
-    To pull latest images:                         make pull
-```
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
deleted file mode 100644
index 9fb8476..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-dev/Dockerfile
+++ /dev/null
@@ -1,123 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-FROM centos:6
-
-MAINTAINER Richard Guo <ri...@pivotal.io>
-
-# install all software we need
-RUN yum install -y epel-release && \
- yum makecache && \
- yum install -y man passwd sudo tar which git mlocate links make bzip2 \
- autoconf automake libtool m4 gcc gcc-c++ gdb flex cmake gperf indent \
- libuuid-devel krb5-devel libgsasl-devel expat-devel libxml2-devel \
- perl-ExtUtils-Embed pam-devel python-devel snappy-devel \
- libyaml-devel libevent-devel bzip2-devel openssl-devel \
- openldap-devel readline-devel net-snmp-devel apr-devel \
- libesmtp-devel xerces-c-devel python-pip json-c-devel \
- apache-ivy java-1.7.0-openjdk-devel wget \
- openssh-clients openssh-server perl-JSON && \
- yum clean all
-
-# update gcc
-RUN wget -O /etc/yum.repos.d/slc6-devtoolset.repo http://linuxsoft.cern.ch/cern/devtoolset/slc6-devtoolset.repo && \
- rpm --import http://ftp.scientificlinux.org/linux/scientific/5x/x86_64/RPM-GPG-KEYs/RPM-GPG-KEY-cern && \
- yum install -y devtoolset-2-gcc devtoolset-2-binutils devtoolset-2-gcc-c++ && \
- echo "source /opt/rh/devtoolset-2/enable" >> ~/.bashrc && \
- source ~/.bashrc
-
-# install libcurl 7.45.0
-RUN mkdir -p /tmp/build/ && \
- cd /tmp/build && curl -L "http://curl.haxx.se/download/curl-7.45.0.tar.bz2" -o curl-7.45.0.tar.bz2 && \
- tar -xjf curl-7.45.0.tar.bz2 && cd curl-7.45.0 && \
- ./configure --prefix=/usr && make && make install && \
- rm -rf /tmp/build && ldconfig
-
-# install maven
-RUN curl -L "http://repos.fedorapeople.org/repos/dchen/apache-maven/epel-apache-maven.repo" -o /etc/yum.repos.d/epel-apache-maven.repo && \
- yum install -y apache-maven && \
- yum clean all
-
-# OS requirements
-RUN echo "kernel.sem = 250 512000 100 2048" >> /etc/sysctl.conf
-
-# setup ssh server and keys for root
-RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
- cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
- chmod 0600 ~/.ssh/authorized_keys
-
-# setup JAVA_HOME for all users
-RUN echo "#!/bin/sh" > /etc/profile.d/java.sh && \
- echo "export JAVA_HOME=/etc/alternatives/java_sdk" >> /etc/profile.d/java.sh && \
- chmod a+x /etc/profile.d/java.sh
-
-# install boost 1.59
- RUN mkdir -p /tmp/build && \
-  cd /tmp/build && curl -L "http://downloads.sourceforge.net/project/boost/boost/1.59.0/boost_1_59_0.tar.bz2" -o boost_1_59_0.tar.bz2 && \
-  tar -xjf boost_1_59_0.tar.bz2 && cd boost_1_59_0 && \
-  ./bootstrap.sh && ./b2 --prefix=/usr -q && ./b2 --prefix=/usr -q install && \
-  rm -rf /tmp/build
-
-# install bison 2.5.1
-RUN mkdir -p /tmp/build/ && \
- cd /tmp/build && curl -L "ftp://ftp.gnu.org/gnu/bison/bison-2.5.1.tar.gz" -o bison-2.5.1.tar.gz && \
- tar -xzf bison-2.5.1.tar.gz && cd bison-2.5.1 && \
- ./configure --prefix=/usr && make && make install && \
- rm -rf /tmp/build
-
-# install thrift 0.9.1
-RUN mkdir -p /tmp/build && \
- cd /tmp/build && curl -L "https://archive.apache.org/dist/thrift/0.9.1/thrift-0.9.1.tar.gz" -o thrift-0.9.1.tar.gz && \
- tar -xf thrift-0.9.1.tar.gz && cd thrift-0.9.1 && \
- ./configure --prefix=/usr --without-tests && \
- make && make install && \
- rm -rf /tmp/build
-
-# install protobuf 2.5.0
-RUN mkdir -p /tmp/build/ && \
- cd /tmp/build && curl -L "https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.bz2" -o protobuf-2.5.0.tar.bz2 && \
- tar -xjf protobuf-2.5.0.tar.bz2 && cd protobuf-2.5.0 && \
- ./configure --prefix=/usr && make && make install && ldconfig && \
- rm -rf /tmp/build
-
-# install python module 
-RUN pip --retries=50 --timeout=300 install pycrypto
-
-# create user gpadmin since HAWQ cannot run under root
-RUN groupadd -g 1000 gpadmin && \
- useradd -u 1000 -g 1000 gpadmin && \
- echo "gpadmin  ALL=(ALL)       NOPASSWD: ALL" > /etc/sudoers.d/gpadmin
-
-# sudo should not require tty
-RUN sed -i -e 's|Defaults    requiretty|#Defaults    requiretty|' /etc/sudoers
-
-RUN echo "#!/bin/bash" > /etc/profile.d/user.sh && \
- echo "export USER=\`whoami\`" >> /etc/profile.d/user.sh && \
- chmod a+x /etc/profile.d/user.sh
-
-ENV BASEDIR /data
-RUN mkdir -p /data && chmod 777 /data
-
-USER gpadmin
-
-# setup ssh client keys for gpadmin
-RUN ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa && \
- cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \
- chmod 0600 ~/.ssh/authorized_keys
-
-WORKDIR /data
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile b/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
deleted file mode 100644
index 94a04fe..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/Dockerfile
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-FROM hawq/hawq-dev:centos6
-
-MAINTAINER Richard Guo <ri...@pivotal.io>
-
-USER root
-
-# install HDP 2.5.0
-RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.5.0.0/hdp.repo" -o /etc/yum.repos.d/hdp.repo && \
- yum install -y hadoop hadoop-hdfs hadoop-libhdfs hadoop-yarn hadoop-mapreduce hadoop-client hdp-select && \
- yum clean all
-
-RUN ln -s /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh /usr/bin/hadoop-daemon.sh
-
-COPY conf/* /etc/hadoop/conf/
-
-COPY entrypoint.sh /usr/bin/entrypoint.sh
-COPY start-hdfs.sh /usr/bin/start-hdfs.sh
-
-USER gpadmin
-
-ENTRYPOINT ["entrypoint.sh"]
-CMD ["bash"]
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml
deleted file mode 100644
index 30f4eb9..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/capacity-scheduler.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<configuration>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.1</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.resource-calculator</name>
-    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-    <description>
-      The ResourceCalculator implementation to be used to compare 
-      Resources in the scheduler.
-      The default i.e. DefaultResourceCalculator only uses Memory while
-      DominantResourceCalculator uses dominant-resource to compare 
-      multi-dimensional resources such as Memory, CPU etc.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      Number of missed scheduling opportunities after which the CapacityScheduler 
-      attempts to schedule rack-local containers. 
-      Typically this should be set to number of nodes in the cluster, By default is setting 
-      approximately number of nodes in one rack which is 40.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.queue-mappings</name>
-    <value></value>
-    <description>
-      A list of mappings that will be used to assign jobs to queues
-      The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
-      Typically this list will be used to map users to queues,
-      for example, u:%user:%user maps all users to queues with the same name
-      as the user.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
-    <value>false</value>
-    <description>
-      If a queue mapping is present, will it override the value specified
-      by the user? This can be used by administrators to place jobs in queues
-      that are different than the one specified by the user.
-      The default is false.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl b/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl
deleted file mode 100644
index d50d80b..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/configuration.xsl
+++ /dev/null
@@ -1,40 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-<tr>
-  <td><a name="{name}"><xsl:value-of select="name"/></a></td>
-  <td><xsl:value-of select="value"/></td>
-  <td><xsl:value-of select="description"/></td>
-</tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg b/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg
deleted file mode 100644
index d68cee8..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/container-executor.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group
-banned.users=#comma separated list of users who can not run applications
-min.user.id=1000#Prevent other super-users
-allowed.system.users=##comma separated list of system users who CAN run applications

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
deleted file mode 100644
index afc37fc..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/core-site.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-	<property>
-		<name>fs.defaultFS</name>
-		<value>hdfs://${hdfs.namenode}:8020</value>
-	</property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd
deleted file mode 100644
index bb40ec9..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.cmd
+++ /dev/null
@@ -1,92 +0,0 @@
-@echo off
-@rem Licensed to the Apache Software Foundation (ASF) under one or more
-@rem contributor license agreements.  See the NOTICE file distributed with
-@rem this work for additional information regarding copyright ownership.
-@rem The ASF licenses this file to You under the Apache License, Version 2.0
-@rem (the "License"); you may not use this file except in compliance with
-@rem the License.  You may obtain a copy of the License at
-@rem
-@rem     http://www.apache.org/licenses/LICENSE-2.0
-@rem
-@rem Unless required by applicable law or agreed to in writing, software
-@rem distributed under the License is distributed on an "AS IS" BASIS,
-@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-@rem See the License for the specific language governing permissions and
-@rem limitations under the License.
-
-@rem Set Hadoop-specific environment variables here.
-
-@rem The only required environment variable is JAVA_HOME.  All others are
-@rem optional.  When running a distributed configuration it is best to
-@rem set JAVA_HOME in this file, so that it is correctly defined on
-@rem remote nodes.
-
-@rem The java implementation to use.  Required.
-set JAVA_HOME=%JAVA_HOME%
-
-@rem The jsvc implementation to use. Jsvc is required to run secure datanodes.
-@rem set JSVC_HOME=%JSVC_HOME%
-
-@rem set HADOOP_CONF_DIR=
-
-@rem Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
-if exist %HADOOP_HOME%\contrib\capacity-scheduler (
-  if not defined HADOOP_CLASSPATH (
-    set HADOOP_CLASSPATH=%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
-  ) else (
-    set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%HADOOP_HOME%\contrib\capacity-scheduler\*.jar
-  )
-)
-
-@rem If TEZ_CLASSPATH is defined in the env, that means that TEZ is enabled
-@rem append it to the HADOOP_CLASSPATH
-
-if defined TEZ_CLASSPATH (
-  if not defined HADOOP_CLASSPATH (
-    set HADOOP_CLASSPATH=%TEZ_CLASSPATH%
-  ) else (
-    set HADOOP_CLASSPATH=%HADOOP_CLASSPATH%;%TEZ_CLASSPATH%
-  )
-)
-
-@rem The maximum amount of heap to use, in MB. Default is 1000.
-@rem set HADOOP_HEAPSIZE=
-@rem set HADOOP_NAMENODE_INIT_HEAPSIZE=""
-
-@rem Extra Java runtime options.  Empty by default.
-@rem set HADOOP_OPTS=%HADOOP_OPTS% -Djava.net.preferIPv4Stack=true
-
-@rem Command specific options appended to HADOOP_OPTS when specified
-if not defined HADOOP_SECURITY_LOGGER (
-  set HADOOP_SECURITY_LOGGER=INFO,RFAS
-)
-if not defined HDFS_AUDIT_LOGGER (
-  set HDFS_AUDIT_LOGGER=INFO,NullAppender
-)
-
-set HADOOP_NAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_NAMENODE_OPTS%
-set HADOOP_DATANODE_OPTS=-Dhadoop.security.logger=ERROR,RFAS %HADOOP_DATANODE_OPTS%
-set HADOOP_SECONDARYNAMENODE_OPTS=-Dhadoop.security.logger=%HADOOP_SECURITY_LOGGER% -Dhdfs.audit.logger=%HDFS_AUDIT_LOGGER% %HADOOP_SECONDARYNAMENODE_OPTS%
-
-@rem The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-set HADOOP_CLIENT_OPTS=-Xmx512m %HADOOP_CLIENT_OPTS%
-@rem set HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData %HADOOP_JAVA_PLATFORM_OPTS%"
-
-@rem On secure datanodes, user to run the datanode as after dropping privileges
-set HADOOP_SECURE_DN_USER=%HADOOP_SECURE_DN_USER%
-
-@rem Where log files are stored.  %HADOOP_HOME%/logs by default.
-@rem set HADOOP_LOG_DIR=%HADOOP_LOG_DIR%\%USERNAME%
-
-@rem Where log files are stored in the secure data environment.
-set HADOOP_SECURE_DN_LOG_DIR=%HADOOP_LOG_DIR%\%HADOOP_HDFS_USER%
-
-@rem The directory where pid files are stored. /tmp by default.
-@rem NOTE: this should be set to a directory that can only be written to by 
-@rem       the user that will run the hadoop daemons.  Otherwise there is the
-@rem       potential for a symlink attack.
-set HADOOP_PID_DIR=%HADOOP_PID_DIR%
-set HADOOP_SECURE_DN_PID_DIR=%HADOOP_PID_DIR%
-
-@rem A string representing this instance of hadoop. %USERNAME% by default.
-set HADOOP_IDENT_STRING=%USERNAME%

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
deleted file mode 100644
index 95511ed..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-env.sh
+++ /dev/null
@@ -1,110 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.
-export JAVA_HOME=/etc/alternatives/java_sdk
-
-# The jsvc implementation to use. Jsvc is required to run secure datanodes
-# that bind to privileged ports to provide authentication of data transfer
-# protocol.  Jsvc is not required if SASL is configured for authentication of
-# data transfer protocol using non-privileged ports.
-#export JSVC_HOME=${JSVC_HOME}
-
-#export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
-
-# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
-#for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
-#  if [ "$HADOOP_CLASSPATH" ]; then
-#    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
-#  else
-#    export HADOOP_CLASSPATH=$f
-#  fi
-#done
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-#export HADOOP_HEAPSIZE=
-#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
-
-# Setup environment variable for docker image
-if [ -f /etc/profile.d/hadoop.sh ]; then
-  . /etc/profile.d/hadoop.sh
-fi
-
-if [ -z "${NAMENODE}" ]; then
-  echo "environment variable NAMENODE is not set!"
-  exit 1
-fi
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Dhdfs.namenode=${NAMENODE}"
-#export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
-
-# Command specific options appended to HADOOP_OPTS when specified
-#export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
-#export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
-
-#export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
-
-#export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
-#export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-#export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
-#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
-
-# On secure datanodes, user to run the datanode as after dropping privileges.
-# This **MUST** be uncommented to enable secure HDFS if using privileged ports
-# to provide authentication of data transfer protocol.  This **MUST NOT** be
-# defined if SASL is configured for authentication of data transfer protocol
-# using non-privileged ports.
-#export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=/var/log/hadoop
-export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
-
-# Where log files are stored in the secure data environment.
-#export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
-
-###
-# HDFS Mover specific parameters
-###
-# Specify the JVM options to be used when starting the HDFS Mover.
-# These options will be appended to the options specified as HADOOP_OPTS
-# and therefore may override any similar flags set in HADOOP_OPTS
-#
-# export HADOOP_MOVER_OPTS=""
-
-###
-# Advanced Users Only!
-###
-
-# The directory where pid files are stored. /tmp by default.
-# NOTE: this should be set to a directory that can only be written to by
-#       the user that will run the hadoop daemons.  Otherwise there is the
-#       potential for a symlink attack.
-#export HADOOP_PID_DIR=${HADOOP_PID_DIR}
-#export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
-
-# A string representing this instance of hadoop. $USER by default.
-#export HADOOP_IDENT_STRING=$USER

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties
deleted file mode 100644
index c1b2eb7..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics.properties
+++ /dev/null
@@ -1,75 +0,0 @@
-# Configuration of the "dfs" context for null
-dfs.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "dfs" context for file
-#dfs.class=org.apache.hadoop.metrics.file.FileContext
-#dfs.period=10
-#dfs.fileName=/tmp/dfsmetrics.log
-
-# Configuration of the "dfs" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-# dfs.period=10
-# dfs.servers=localhost:8649
-
-
-# Configuration of the "mapred" context for null
-mapred.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "mapred" context for file
-#mapred.class=org.apache.hadoop.metrics.file.FileContext
-#mapred.period=10
-#mapred.fileName=/tmp/mrmetrics.log
-
-# Configuration of the "mapred" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-# mapred.period=10
-# mapred.servers=localhost:8649
-
-
-# Configuration of the "jvm" context for null
-#jvm.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "jvm" context for file
-#jvm.class=org.apache.hadoop.metrics.file.FileContext
-#jvm.period=10
-#jvm.fileName=/tmp/jvmmetrics.log
-
-# Configuration of the "jvm" context for ganglia
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-# jvm.period=10
-# jvm.servers=localhost:8649
-
-# Configuration of the "rpc" context for null
-rpc.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "rpc" context for file
-#rpc.class=org.apache.hadoop.metrics.file.FileContext
-#rpc.period=10
-#rpc.fileName=/tmp/rpcmetrics.log
-
-# Configuration of the "rpc" context for ganglia
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-# rpc.period=10
-# rpc.servers=localhost:8649
-
-
-# Configuration of the "ugi" context for null
-ugi.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "ugi" context for file
-#ugi.class=org.apache.hadoop.metrics.file.FileContext
-#ugi.period=10
-#ugi.fileName=/tmp/ugimetrics.log
-
-# Configuration of the "ugi" context for ganglia
-# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# ugi.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-# ugi.period=10
-# ugi.servers=localhost:8649
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties
deleted file mode 100644
index 0c09228..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-metrics2.properties
+++ /dev/null
@@ -1,68 +0,0 @@
-# syntax: [prefix].[source|sink].[instance].[options]
-# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
-
-*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
-# default sampling period, in seconds
-*.period=10
-
-# The namenode-metrics.out will contain metrics from all context
-#namenode.sink.file.filename=namenode-metrics.out
-# Specifying a special sampling period for namenode:
-#namenode.sink.*.period=8
-
-#datanode.sink.file.filename=datanode-metrics.out
-
-#resourcemanager.sink.file.filename=resourcemanager-metrics.out
-
-#nodemanager.sink.file.filename=nodemanager-metrics.out
-
-#mrappmaster.sink.file.filename=mrappmaster-metrics.out
-
-#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
-
-# the following example split metrics of different
-# context to different sinks (in this case files)
-#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
-#nodemanager.sink.file_jvm.context=jvm
-#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
-#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
-#nodemanager.sink.file_mapred.context=mapred
-#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
-
-#
-# Below are for sending metrics to Ganglia
-#
-# for Ganglia 3.0 support
-# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
-#
-# for Ganglia 3.1 support
-# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-
-# *.sink.ganglia.period=10
-
-# default for supportsparse is false
-# *.sink.ganglia.supportsparse=true
-
-#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Tag values to use for the ganglia prefix. If not defined no tags are used.
-# If '*' all tags are used. If specifiying multiple tags separate them with 
-# commas. Note that the last segment of the property name is the context name.
-#
-#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
-#*.sink.ganglia.tagsForPrefix.dfs=
-#*.sink.ganglia.tagsForPrefix.rpc=
-#*.sink.ganglia.tagsForPrefix.mapred=
-
-#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
-
-#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml
deleted file mode 100644
index 2bf5c02..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hadoop-policy.xml
+++ /dev/null
@@ -1,226 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
- 
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- "License"); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.user.mappings.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.ha.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HAService protocol used by HAAdmin to manage the
-      active and stand-by states of namenode.</description>
-  </property>
-
-  <property>
-    <name>security.zkfc.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for access to the ZK Failover Controller
-    </description>
-  </property>
-
-  <property>
-    <name>security.qjournal.service.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for QJournalProtocol, used by the NN to communicate with
-    JNs when using the QuorumJournalManager for edit logs.</description>
-  </property>
-
-  <property>
-    <name>security.mrhs.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HSClientProtocol, used by job clients to
-    communciate with the MR History Server job status etc. 
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <!-- YARN Protocols -->
-
-  <property>
-    <name>security.resourcetracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceTrackerProtocol, used by the
-    ResourceManager and NodeManager to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcemanager-administration.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceManagerAdministrationProtocol, for admin commands. 
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationclient.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationClientProtocol, used by the ResourceManager 
-    and applications submission clients to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationmaster.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationMasterProtocol, used by the ResourceManager 
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.containermanagement.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ContainerManagementProtocol protocol, used by the NodeManager 
-    and ApplicationMasters to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.resourcelocalizer.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ResourceLocalizer protocol, used by the NodeManager 
-    and ResourceLocalizer to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for MRClientProtocol, used by job clients to
-    communciate with the MR ApplicationMaster to query job status etc. 
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.applicationhistory.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ApplicationHistoryProtocol, used by the timeline
-    server and the generic history service client to communicate with each other.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml
deleted file mode 100644
index f565658..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/hdfs-site.xml
+++ /dev/null
@@ -1,100 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-	<property>
-		<name>dfs.name.dir</name>
-		<value>/tmp/hdfs/name</value>
-		<final>true</final>
-	</property>
-
-	<property>
-		<name>dfs.data.dir</name>
-		<value>/tmp/hdfs/data</value>
-		<final>true</final>
-	</property>
-
-	<property>
-		<name>dfs.permissions</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>dfs.support.append</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>dfs.block.local-path-access.user</name>
-		<value>${user.name}</value>
-	</property>
-
-	<property>
-		<name>dfs.replication</name>
-		<value>3</value>
-	</property>
-
-	<property>
-		<name>dfs.datanode.socket.write.timeout</name>
-		<value>0</value>
-		<description>
-			used for sockets to and from datanodes. It is 8 minutes by default. Some
-			users set this to 0, effectively disabling the write timeout.
-		</description>
-	</property>
-
-	<property>
-		<name>dfs.webhdfs.enabled</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>dfs.allow.truncate</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>dfs.namenode.fs-limits.min-block-size</name>
-		<value>1024</value>
-	</property>
-
-	<property>
-		<name>dfs.client.read.shortcircuit</name>
-		<value>true</value>
-	</property>
-
-	<property>
-		<name>dfs.domain.socket.path</name>
-		<value>/var/lib/hadoop-hdfs/dn_socket</value>
-	</property>
-
-	<property>
-		<name>dfs.block.access.token.enable</name>
-		<value>true</value>
-		<description>
-			If "true", access tokens are used as capabilities for accessing
-			datanodes.
-			If "false", no access tokens are checked on accessing datanodes.
-		</description>
-	</property>
-
-	<property>
-		<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
-		<value>false</value>
-	</property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml
deleted file mode 100644
index cba69f4..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-acls.xml
+++ /dev/null
@@ -1,135 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-
-  <!-- This file is hot-reloaded when it changes -->
-
-  <!-- KMS ACLs -->
-
-  <property>
-    <name>hadoop.kms.acl.CREATE</name>
-    <value>*</value>
-    <description>
-      ACL for create-key operations.
-      If the user is not in the GET ACL, the key material is not returned
-      as part of the response.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.DELETE</name>
-    <value>*</value>
-    <description>
-      ACL for delete-key operations.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.ROLLOVER</name>
-    <value>*</value>
-    <description>
-      ACL for rollover-key operations.
-      If the user is not in the GET ACL, the key material is not returned
-      as part of the response.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.GET</name>
-    <value>*</value>
-    <description>
-      ACL for get-key-version and get-current-key operations.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.GET_KEYS</name>
-    <value>*</value>
-    <description>
-      ACL for get-keys operations.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.GET_METADATA</name>
-    <value>*</value>
-    <description>
-      ACL for get-key-metadata and get-keys-metadata operations.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.SET_KEY_MATERIAL</name>
-    <value>*</value>
-    <description>
-      Complementary ACL for CREATE and ROLLOVER operations to allow the client
-      to provide the key material when creating or rolling a key.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.GENERATE_EEK</name>
-    <value>*</value>
-    <description>
-      ACL for generateEncryptedKey CryptoExtension operations.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.acl.DECRYPT_EEK</name>
-    <value>*</value>
-    <description>
-      ACL for decryptEncryptedKey CryptoExtension operations.
-    </description>
-  </property>
-
-  <property>
-    <name>default.key.acl.MANAGEMENT</name>
-    <value>*</value>
-    <description>
-      default ACL for MANAGEMENT operations for all key acls that are not
-      explicitly defined.
-    </description>
-  </property>
-
-  <property>
-    <name>default.key.acl.GENERATE_EEK</name>
-    <value>*</value>
-    <description>
-      default ACL for GENERATE_EEK operations for all key acls that are not
-      explicitly defined.
-    </description>
-  </property>
-
-  <property>
-    <name>default.key.acl.DECRYPT_EEK</name>
-    <value>*</value>
-    <description>
-      default ACL for DECRYPT_EEK operations for all key acls that are not
-      explicitly defined.
-    </description>
-  </property>
-
-  <property>
-    <name>default.key.acl.READ</name>
-    <value>*</value>
-    <description>
-      default ACL for READ operations for all key acls that are not
-      explicitly defined.
-    </description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh
deleted file mode 100644
index 44dfe6a..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-env.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/bash
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License. See accompanying LICENSE file.
-#
-
-# Set kms specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs KMS
-# Java System properties for KMS should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# KMS logs directory
-#
-# export KMS_LOG=${KMS_HOME}/logs
-
-# KMS temporary directory
-#
-# export KMS_TEMP=${KMS_HOME}/temp
-
-# The HTTP port used by KMS
-#
-# export KMS_HTTP_PORT=16000
-
-# The Admin port used by KMS
-#
-# export KMS_ADMIN_PORT=`expr ${KMS_HTTP_PORT} + 1`
-
-# The maximum number of Tomcat handler threads
-#
-# export KMS_MAX_THREADS=1000
-
-# The location of the SSL keystore if using SSL
-#
-# export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
-
-# The password of the SSL keystore if using SSL
-#
-# export KMS_SSL_KEYSTORE_PASS=password
-
-# The full path to any native libraries that need to be loaded
-# (For eg. location of natively compiled tomcat Apache portable
-# runtime (APR) libraries
-#
-# export JAVA_LIBRARY_PATH=${HOME}/lib/native

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties
deleted file mode 100644
index 8e6d909..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-log4j.properties
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'kms.log.dir' is not defined at KMS start up time
-# Setup sets its value to '${kms.home}/logs'
-
-log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.kms.DatePattern='.'yyyy-MM-dd
-log4j.appender.kms.File=${kms.log.dir}/kms.log
-log4j.appender.kms.Append=true
-log4j.appender.kms.layout=org.apache.log4j.PatternLayout
-log4j.appender.kms.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n
-
-log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.kms-audit.DatePattern='.'yyyy-MM-dd
-log4j.appender.kms-audit.File=${kms.log.dir}/kms-audit.log
-log4j.appender.kms-audit.Append=true
-log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
-log4j.appender.kms-audit.layout.ConversionPattern=%d{ISO8601} %m%n
-
-log4j.logger.kms-audit=INFO, kms-audit
-log4j.additivity.kms-audit=false
-
-log4j.rootLogger=ALL, kms
-log4j.logger.org.apache.hadoop.conf=ERROR
-log4j.logger.org.apache.hadoop=INFO
-log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/1cb29096/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml
----------------------------------------------------------------------
diff --git a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml b/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml
deleted file mode 100644
index a810ca4..0000000
--- a/contrib/hawq-docker/centos6-docker/hawq-test/conf/kms-site.xml
+++ /dev/null
@@ -1,173 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-
-  <!-- KMS Backend KeyProvider -->
-
-  <property>
-    <name>hadoop.kms.key.provider.uri</name>
-    <value>jceks://file@/${user.home}/kms.keystore</value>
-    <description>
-      URI of the backing KeyProvider for the KMS.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.security.keystore.JavaKeyStoreProvider.password</name>
-    <value>none</value>
-    <description>
-      If using the JavaKeyStoreProvider, the password for the keystore file.
-    </description>
-  </property>
-
-  <!-- KMS Cache -->
-
-  <property>
-    <name>hadoop.kms.cache.enable</name>
-    <value>true</value>
-    <description>
-      Whether the KMS will act as a cache for the backing KeyProvider.
-      When the cache is enabled, operations like getKeyVersion, getMetadata,
-      and getCurrentKey will sometimes return cached data without consulting
-      the backing KeyProvider. Cached values are flushed when keys are deleted
-      or modified.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.cache.timeout.ms</name>
-    <value>600000</value>
-    <description>
-      Expiry time for the KMS key version and key metadata cache, in
-      milliseconds. This affects getKeyVersion and getMetadata.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.current.key.cache.timeout.ms</name>
-    <value>30000</value>
-    <description>
-      Expiry time for the KMS current key cache, in milliseconds. This
-      affects getCurrentKey operations.
-    </description>
-  </property>
-
-  <!-- KMS Audit -->
-
-  <property>
-    <name>hadoop.kms.audit.aggregation.window.ms</name>
-    <value>10000</value>
-    <description>
-      Duplicate audit log events within the aggregation window (specified in
-      ms) are quashed to reduce log traffic. A single message for aggregated
-      events is printed at the end of the window, along with a count of the
-      number of aggregated events.
-    </description>
-  </property>
-
-  <!-- KMS Security -->
-
-  <property>
-    <name>hadoop.kms.authentication.type</name>
-    <value>simple</value>
-    <description>
-      Authentication type for the KMS. Can be either &quot;simple&quot;
-      or &quot;kerberos&quot;.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.kerberos.keytab</name>
-    <value>${user.home}/kms.keytab</value>
-    <description>
-      Path to the keytab with credentials for the configured Kerberos principal.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.kerberos.principal</name>
-    <value>HTTP/localhost</value>
-    <description>
-      The Kerberos principal to use for the HTTP endpoint.
-      The principal must start with 'HTTP/' as per the Kerberos HTTP SPNEGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.kerberos.name.rules</name>
-    <value>DEFAULT</value>
-    <description>
-      Rules used to resolve Kerberos principal names.
-    </description>
-  </property>
-
-  <!-- Authentication cookie signature source -->
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider</name>
-    <value>random</value>
-    <description>
-      Indicates how the secret to sign the authentication cookies will be
-      stored. Options are 'random' (default), 'string' and 'zookeeper'.
-      If using a setup with multiple KMS instances, 'zookeeper' should be used.
-    </description>
-  </property>
-
-  <!-- Configuration for 'zookeeper' authentication cookie signature source -->
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.path</name>
-    <value>/hadoop-kms/hadoop-auth-signature-secret</value>
-    <description>
-      The Zookeeper ZNode path where the KMS instances will store and retrieve
-      the secret from.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string</name>
-    <value>#HOSTNAME#:#PORT#,...</value>
-    <description>
-      The Zookeeper connection string, a list of hostnames and port comma
-      separated.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.auth.type</name>
-    <value>kerberos</value>
-    <description>
-      The Zookeeper authentication type, 'none' or 'sasl' (Kerberos).
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.keytab</name>
-    <value>/etc/hadoop/conf/kms.keytab</value>
-    <description>
-      The absolute path for the Kerberos keytab with the credentials to
-      connect to Zookeeper.
-    </description>
-  </property>
-
-  <property>
-    <name>hadoop.kms.authentication.signer.secret.provider.zookeeper.kerberos.principal</name>
-    <value>kms/#HOSTNAME#</value>
-    <description>
-      The Kerberos service principal used to connect to Zookeeper.
-    </description>
-  </property>
-
-</configuration>