You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2014/12/17 21:05:33 UTC

[01/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Repository: ambari
Updated Branches:
  refs/heads/trunk 75cdb28b8 -> 191232e29


http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
index 937687d..bcda8da 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
@@ -22,12 +22,16 @@ from ambari_commons import OSCheck
 from mock.mock import MagicMock, patch
 
 class TestSNamenode(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
                        classname = "SNameNode",
                        command = "configure",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
@@ -38,10 +42,12 @@ class TestSNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
                        classname = "SNameNode",
                        command = "start",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
@@ -73,10 +79,12 @@ class TestSNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
                        classname = "SNameNode",
                        command = "stop",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
@@ -105,10 +113,12 @@ class TestSNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
                        classname = "SNameNode",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
@@ -119,10 +129,12 @@ class TestSNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
                        classname = "SNameNode",
                        command = "start",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
@@ -154,10 +166,12 @@ class TestSNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/snamenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/snamenode.py",
                        classname = "SNameNode",
                        command = "stop",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
index 3a0c744..c358e9e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
@@ -22,12 +22,16 @@ from ambari_commons import OSCheck
 from mock.mock import MagicMock, patch
 
 class TestZkfc(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/zkfc_slave.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
                        classname = "ZkfcSlave",
                        command = "start",
-                       config_file="ha_default.json"
+                       config_file = "ha_default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/security/limits.d',
                               owner = 'root',
@@ -84,10 +88,12 @@ class TestZkfc(RMFTestCase):
 
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/zkfc_slave.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
                        classname = "ZkfcSlave",
                        command = "stop",
-                       config_file="ha_default.json"
+                       config_file = "ha_default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
                               owner = 'hdfs',
@@ -111,10 +117,12 @@ class TestZkfc(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/zkfc_slave.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
                        classname = "ZkfcSlave",
                        command = "start",
-                       config_file="ha_secured.json"
+                       config_file = "ha_secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/security/limits.d',
                               owner = 'root',
@@ -170,10 +178,12 @@ class TestZkfc(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/zkfc_slave.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zkfc_slave.py",
                        classname = "ZkfcSlave",
                        command = "stop",
-                       config_file="ha_secured.json"
+                       config_file = "ha_secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
                               owner = 'hdfs',

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
index 372afe0..aa82924 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
@@ -22,12 +22,16 @@ from stacks.utils.RMFTestCase import *
 
 @patch("os.path.exists", new = MagicMock(return_value=True))
 class TestZookeeperClient(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "ZOOKEEPER/3.4.5.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_client.py",
                        classname = "ZookeeperClient",
                        command = "configure",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Directory', '/etc/zookeeper/conf',
@@ -80,11 +84,12 @@ class TestZookeeperClient(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_configure_secured(self):
-
-    self.executeScript("2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_client.py",
                        classname = "ZookeeperClient",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Directory', '/etc/zookeeper/conf',

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
index c626fb9..ff3b413 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
@@ -22,21 +22,27 @@ from stacks.utils.RMFTestCase import *
 
 @patch("os.path.exists", new = MagicMock(return_value=True))
 class TestZookeeperServer(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "ZOOKEEPER/3.4.5.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                   classname = "ZookeeperServer",
-                   command = "configure",
-                   config_file="default.json"
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
+                       classname = "ZookeeperServer",
+                       command = "configure",
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertNoMoreResources()
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                   classname = "ZookeeperServer",
-                   command = "start",
-                   config_file="default.json"
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
+                       classname = "ZookeeperServer",
+                       command = "start",
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assert_configure_default()
@@ -47,10 +53,12 @@ class TestZookeeperServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                  classname = "ZookeeperServer",
-                  command = "stop",
-                  config_file="default.json"
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
+                       classname = "ZookeeperServer",
+                       command = "stop",
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', 'source /etc/zookeeper/conf/zookeeper-env.sh ; env ZOOCFGDIR=/etc/zookeeper/conf ZOOCFG=zoo.cfg /usr/lib/zookeeper/bin/zkServer.sh stop',
@@ -60,19 +68,23 @@ class TestZookeeperServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                  classname = "ZookeeperServer",
-                  command = "configure",
-                  config_file="secured.json"
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
+                       classname = "ZookeeperServer",
+                       command = "configure",
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertNoMoreResources()
 
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                  classname = "ZookeeperServer",
-                  command = "start",
-                  config_file="secured.json"
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
+                       classname = "ZookeeperServer",
+                       command = "start",
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assert_configure_secured()
@@ -85,10 +97,12 @@ class TestZookeeperServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py",
-                  classname = "ZookeeperServer",
-                  command = "stop",
-                  config_file="secured.json"
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/zookeeper_server.py",
+                       classname = "ZookeeperServer",
+                       command = "stop",
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', 'source /etc/zookeeper/conf/zookeeper-env.sh ; env ZOOCFGDIR=/etc/zookeeper/conf ZOOCFG=zoo.cfg /usr/lib/zookeeper/bin/zkServer.sh stop',

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py
index 6e1f6eb..ade731f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py
@@ -21,13 +21,16 @@ from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 
 class TestServiceCheck(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "ZOOKEEPER/3.4.5.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_service_check_default(self):
-
-    self.executeScript("2.0.6/services/ZOOKEEPER/package/scripts/service_check.py",
-                       classname="ZookeeperServiceCheck",
-                       command="service_check",
-                       config_file="default.json"
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
+                       classname = "ZookeeperServiceCheck",
+                       command = "service_check",
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/zkSmoke.sh',
                        content = StaticFile('zkSmoke.sh'),
@@ -42,11 +45,12 @@ class TestServiceCheck(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_service_check_secured(self):
-
-    self.executeScript("2.0.6/services/ZOOKEEPER/package/scripts/service_check.py",
-                       classname="ZookeeperServiceCheck",
-                       command="service_check",
-                       config_file="secured.json"
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
+                       classname = "ZookeeperServiceCheck",
+                       command = "service_check",
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/zkSmoke.sh',
                        content = StaticFile('zkSmoke.sh'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index b0920ca..63bcdb8 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -37,6 +37,8 @@ with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
 PATH_TO_STACKS = "main/resources/stacks/HDP"
 PATH_TO_STACK_TESTS = "test/python/stacks/"
 
+PATH_TO_COMMON_SERVICES = "main/resources/common-services"
+
 PATH_TO_CUSTOM_ACTIONS = "main/resources/custom_actions"
 PATH_TO_CUSTOM_ACTION_TESTS = "test/python/custom_actions"
 MAX_SHOWN_DICT_LEN = 10
@@ -50,10 +52,14 @@ class RMFTestCase(TestCase):
   # (default) build all paths to test custom action scripts
   TARGET_CUSTOM_ACTIONS = 'TARGET_CUSTOM_ACTIONS'
 
+  # build all paths to test common services scripts
+  TARGET_COMMON_SERVICES = 'TARGET_COMMON_SERVICES'
+
   def executeScript(self, path, classname=None, command=None, config_file=None,
                     config_dict=None,
                     # common mocks for all the scripts
                     config_overrides = None,
+                    hdp_stack_version = None,
                     shell_mock_value = (0, "OK."), 
                     os_type=('Suse','11','Final'),
                     kinit_path_local="/usr/bin/kinit",
@@ -64,14 +70,17 @@ class RMFTestCase(TestCase):
     src_dir = RMFTestCase._getSrcFolder()
     if target == self.TARGET_STACKS:
       stack_version = norm_path.split(os.sep)[0]
-      stacks_path = os.path.join(src_dir, PATH_TO_STACKS)
+      base_path = os.path.join(src_dir, PATH_TO_STACKS)
       configs_path = os.path.join(src_dir, PATH_TO_STACK_TESTS, stack_version, "configs")
     elif target == self.TARGET_CUSTOM_ACTIONS:
-      stacks_path = os.path.join(src_dir, PATH_TO_CUSTOM_ACTIONS)
+      base_path = os.path.join(src_dir, PATH_TO_CUSTOM_ACTIONS)
       configs_path = os.path.join(src_dir, PATH_TO_CUSTOM_ACTION_TESTS, "configs")
+    elif target == self.TARGET_COMMON_SERVICES:
+      base_path = os.path.join(src_dir, PATH_TO_COMMON_SERVICES)
+      configs_path = os.path.join(src_dir, PATH_TO_STACK_TESTS, hdp_stack_version, "configs")
     else:
       raise RuntimeError("Wrong target value %s", target)
-    script_path = os.path.join(stacks_path, norm_path)
+    script_path = os.path.join(base_path, norm_path)
     if config_file is not None and config_dict is None:
       config_file_path = os.path.join(configs_path, config_file)
       try:


[32/37] ambari git commit: AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.oracle.sql b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.oracle.sql
new file mode 100644
index 0000000..812b897
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.oracle.sql
@@ -0,0 +1,718 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(128) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL, 
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.postgres.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.postgres.sql b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.postgres.sql
new file mode 100644
index 0000000..bc6486b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.postgres.sql
@@ -0,0 +1,1406 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+    "SD_ID" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000),
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+--
+-- PostgreSQL database dump complete
+--
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/addMysqlUser.sh
new file mode 100644
index 0000000..4e8b968
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/addMysqlUser.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+mysqldbpasswd=$3
+userhost=$4
+
+sudo service $mysqldservice start
+echo "Adding user $mysqldbuser@% and removing users with empty name"
+sudo su mysql -s /bin/bash - -c "mysql -u root -e \"CREATE USER '$mysqldbuser'@'%' IDENTIFIED BY '$mysqldbpasswd';\""
+sudo su mysql -s /bin/bash - -c "mysql -u root -e \"GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'%';\""
+sudo su mysql -s /bin/bash - -c "mysql -u root -e \"DELETE FROM mysql.user WHERE user='';\""
+sudo su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/alert_hive_thrift_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/alert_hive_thrift_port.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/alert_hive_thrift_port.py
new file mode 100644
index 0000000..499640f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/alert_hive_thrift_port.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import json
+import socket
+import time
+import traceback
+import urllib2
+from resource_management.libraries.functions import hive_check
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.core.environment import Environment
+
+OK_MESSAGE = "TCP OK - %.4f response on port %s"
+CRITICAL_MESSAGE = "Connection failed on host {0}:{1}"
+
+HIVE_SERVER_THRIFT_PORT_KEY = '{{hive-site/hive.server2.thrift.port}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+HIVE_SERVER2_AUTHENTICATION_KEY = '{{hive-site/hive.server2.authentication}}'
+HIVE_SERVER_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.principal}}'
+SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
+SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
+
+PERCENT_WARNING = 200
+PERCENT_CRITICAL = 200
+
+THRIFT_PORT_DEFAULT = 10000
+HIVE_SERVER_PRINCIPAL_DEFAULT = 'hive/_HOST@EXAMPLE.COM'
+HIVE_SERVER2_AUTHENTICATION_DEFAULT = 'NOSASL'
+SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
+SMOKEUSER_DEFAULT = 'ambari-qa'
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HIVE_SERVER_THRIFT_PORT_KEY,SECURITY_ENABLED_KEY,HIVE_SERVER2_AUTHENTICATION_KEY,HIVE_SERVER_PRINCIPAL_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_KEY)
+  
+
+def execute(parameters=None, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  parameters (dictionary): a mapping of parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if parameters is None:
+    return (('UNKNOWN', ['There were no parameters supplied to the script.']))
+
+  thrift_port = THRIFT_PORT_DEFAULT
+  if HIVE_SERVER_THRIFT_PORT_KEY in parameters:
+    thrift_port = int(parameters[HIVE_SERVER_THRIFT_PORT_KEY])  
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in parameters:
+    security_enabled = str(parameters[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  hive_server2_authentication = HIVE_SERVER2_AUTHENTICATION_DEFAULT
+  if HIVE_SERVER2_AUTHENTICATION_KEY in parameters:
+    hive_server2_authentication = parameters[HIVE_SERVER2_AUTHENTICATION_KEY]
+
+  smokeuser = SMOKEUSER_DEFAULT
+  if SMOKEUSER_KEY in parameters:
+    smokeuser = parameters[SMOKEUSER_KEY]
+
+  result_code = None
+
+  if security_enabled:
+    hive_server_principal = HIVE_SERVER_PRINCIPAL_DEFAULT
+    if HIVE_SERVER_PRINCIPAL_KEY in parameters:
+      hive_server_principal = parameters[HIVE_SERVER_PRINCIPAL_KEY]
+    smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
+    if SMOKEUSER_KEYTAB_KEY in parameters:
+      smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_KEY]
+    with Environment() as env:
+      kinit_path_local = get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+      kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser}; ")
+  else:
+    hive_server_principal = None
+    kinitcmd=None
+
+  try:
+    if host_name is None:
+      host_name = socket.getfqdn()
+
+    start_time = time.time()
+    try:
+      with Environment() as env:
+        hive_check.check_thrift_port_sasl(host_name, thrift_port, hive_server2_authentication,
+                                          hive_server_principal, kinitcmd, smokeuser)
+      is_thrift_port_ok = True
+    except:
+      is_thrift_port_ok = False
+
+    if is_thrift_port_ok == True:
+      result_code = 'OK'
+      total_time = time.time() - start_time
+      label = OK_MESSAGE % (total_time, thrift_port)
+    else:
+      result_code = 'CRITICAL'
+      label = CRITICAL_MESSAGE.format(host_name,thrift_port)
+
+  except Exception, e:
+    label = str(e)
+    result_code = 'UNKNOWN'
+        
+  return ((result_code, [label]))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/alert_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/alert_webhcat_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/alert_webhcat_server.py
new file mode 100644
index 0000000..44840de
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/alert_webhcat_server.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import json
+import socket
+import time
+import urllib2
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+OK_MESSAGE = 'TCP OK - {0:.4f} response on port {1}'
+CRITICAL_CONNECTION_MESSAGE = 'Connection failed on host {0}:{1}'
+CRITICAL_TEMPLETON_STATUS_MESSAGE = 'WebHCat returned an unexpected status of "{0}"'
+CRITICAL_TEMPLETON_UNKNOWN_JSON_MESSAGE = 'Unable to determine WebHCat health from unexpected JSON response'
+
+TEMPLETON_PORT_KEY = '{{webhcat-site/templeton.port}}'
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+
+TEMPLETON_OK_RESPONSE = 'ok'
+TEMPLETON_PORT_DEFAULT = 50111
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (TEMPLETON_PORT_KEY,SECURITY_ENABLED_KEY)      
+  
+
+def execute(parameters=None, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  parameters (dictionary): a mapping of parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  result_code = RESULT_CODE_UNKNOWN
+
+  if parameters is None:
+    return (result_code, ['There were no parameters supplied to the script.'])
+
+  templeton_port = TEMPLETON_PORT_DEFAULT
+  if TEMPLETON_PORT_KEY in parameters:
+    templeton_port = int(parameters[TEMPLETON_PORT_KEY])  
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in parameters:
+    security_enabled = parameters[SECURITY_ENABLED_KEY].lower() == 'true'
+
+  scheme = 'http'
+  if security_enabled is True:
+    scheme = 'https'
+
+  label = ''
+  url_response = None
+  templeton_status = ''
+  total_time = 0
+
+  try:
+    # the alert will always run on the webhcat host
+    if host_name is None:
+      host_name = socket.getfqdn()
+    
+    query = "{0}://{1}:{2}/templeton/v1/status".format(scheme, host_name,
+        templeton_port)
+    
+    # execute the query for the JSON that includes templeton status
+    start_time = time.time()
+    url_response = urllib2.urlopen(query)
+    total_time = time.time() - start_time
+  except:
+    label = CRITICAL_CONNECTION_MESSAGE.format(host_name,templeton_port)
+    return (RESULT_CODE_CRITICAL, [label])
+
+  # URL response received, parse it
+  try:
+    json_response = json.loads(url_response.read())
+    templeton_status = json_response['status']
+  except:
+    return (RESULT_CODE_CRITICAL, [CRITICAL_TEMPLETON_UNKNOWN_JSON_MESSAGE])
+
+  # proper JSON received, compare against known value
+  if templeton_status.lower() == TEMPLETON_OK_RESPONSE:
+    result_code = RESULT_CODE_OK
+    label = OK_MESSAGE.format(total_time, templeton_port)
+  else:
+    result_code = RESULT_CODE_CRITICAL
+    label = CRITICAL_TEMPLETON_STATUS_MESSAGE.format(templeton_status)
+
+  return (result_code, [label])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hcatSmoke.sh
new file mode 100644
index 0000000..d1e2ff1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hcatSmoke.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+
+case "$2" in
+
+prepare)
+  hcat -e "show tables"
+  hcat -e "drop table IF EXISTS ${tablename}"
+  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
+;;
+
+cleanup)
+  hcat -e "drop table IF EXISTS ${tablename}"
+;;
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveSmoke.sh
new file mode 100644
index 0000000..f9f2020
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveSmoke.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
+echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveserver2.sql b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveserver2.sql
new file mode 100644
index 0000000..99a3865
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveserver2.sql
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
+DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveserver2Smoke.sh
new file mode 100644
index 0000000..77d7b3e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/hiveserver2Smoke.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
+
+if [ "x$smokeout" == "x" ]; then
+  echo "Smoke test of hiveserver2 passed"
+  exit 0
+else
+  echo "Smoke test of hiveserver2 wasnt passed"
+  echo $smokeout
+  exit 1
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/pigSmoke.sh b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/pigSmoke.sh
new file mode 100644
index 0000000..2e90ac0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/removeMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/removeMysqlUser.sh b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/removeMysqlUser.sh
new file mode 100644
index 0000000..b035517
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/removeMysqlUser.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+userhost=$3
+myhostname=$(hostname -f)
+sudo_prefix = "sudo -H -E"
+
+$sudo_prefix service $mysqldservice start
+echo "Removing user $mysqldbuser@$userhost"
+sudo su mysql -s /bin/bash - -c "mysql -u root -e \"DROP USER '$mysqldbuser'@'$userhost';\""
+sudo su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
+$sudo_prefix service $mysqldservice stop


[08/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zoo.cfg.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zoo.cfg.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zoo.cfg.xml
new file mode 100644
index 0000000..12e2a00
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zoo.cfg.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>tickTime</name>
+    <value>2000</value>
+    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
+  </property>
+  <property>
+    <name>initLimit</name>
+    <value>10</value>
+    <description>Ticks to allow for sync at Init.</description>
+  </property>
+  <property>
+    <name>syncLimit</name>
+    <value>5</value>
+    <description>Ticks to allow for sync at Runtime.</description>
+  </property>
+  <property>
+    <name>clientPort</name>
+    <value>2181</value>
+    <description>Port for running ZK Server.</description>
+  </property>
+  <property>
+    <name>dataDir</name>
+    <value>/hadoop/zookeeper</value>
+    <description>Data directory for ZooKeeper.</description>
+  </property>
+  <property>
+    <name>autopurge.snapRetainCount</name>
+    <value>30</value>
+    <description>ZooKeeper purge feature retains the autopurge.snapRetainCount
+      most recent snapshots and the corresponding transaction
+      logs in the dataDir and dataLogDir respectively and deletes the rest. </description>
+  </property>
+  <property>
+    <name>autopurge.purgeInterval</name>
+    <value>24</value>
+    <description>The time interval in hours for which the purge task has to be triggered.
+      Set to a positive integer (1 and above) to enable the auto purging.</description>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zookeeper-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zookeeper-env.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zookeeper-env.xml
new file mode 100644
index 0000000..608f504
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zookeeper-env.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>zk_user</name>
+    <value>zookeeper</value>
+    <property-type>USER</property-type>
+    <description>ZooKeeper User.</description>
+  </property>
+  <property>
+    <name>zk_log_dir</name>
+    <value>/var/log/zookeeper</value>
+    <description>ZooKeeper Log Dir</description>
+  </property>
+  <property>
+    <name>zk_pid_dir</name>
+    <value>/var/run/zookeeper</value>
+    <description>ZooKeeper Pid Dir</description>
+  </property>
+
+  
+  <!-- zookeeper-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for zookeeper-env.sh file</description>
+    <value>
+export JAVA_HOME={{java64_home}}
+export ZOOKEEPER_HOME={{zk_home}}
+export ZOO_LOG_DIR={{zk_log_dir}}
+export ZOOPIDFILE={{zk_pid_file}}
+export SERVER_JVMFLAGS={{zk_server_heapsize}}
+export JAVA=$JAVA_HOME/bin/java
+export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
+
+{% if security_enabled %}
+export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
+export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
+{% endif %}
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zookeeper-log4j.xml
new file mode 100644
index 0000000..6fcf5bc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/configuration/zookeeper-log4j.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+#
+# ZooKeeper Logging Configuration
+#
+
+# DEFAULT: console appender only
+log4j.rootLogger=INFO, CONSOLE
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=INFO
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=DEBUG
+log4j.appender.ROLLINGFILE.File=zookeeper.log
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=zookeeper_trace.log
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/metainfo.xml
new file mode 100644
index 0000000..0cb65ea
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/metainfo.xml
@@ -0,0 +1,89 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <displayName>ZooKeeper</displayName>
+      <comment>Centralized service which provides highly reliable distributed coordination</comment>
+      <version>3.4.5.2.0</version>
+      <components>
+
+        <component>
+          <name>ZOOKEEPER_SERVER</name>
+          <displayName>ZooKeeper Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/zookeeper_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZOOKEEPER_CLIENT</name>
+          <displayName>ZooKeeper Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/zookeeper_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>zookeeper-env.sh</fileName>
+              <dictionaryName>zookeeper-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>zookeeper-log4j</dictionaryName>
+            </configFile>            
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zookeeper</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>zookeeper-log4j</config-type>
+        <config-type>zookeeper-env</config-type>
+        <config-type>zoo.cfg</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterChange>true</restartRequiredAfterChange>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkEnv.sh b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkEnv.sh
new file mode 100644
index 0000000..fa1b832
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkEnv.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script should be sourced into other zookeeper
+# scripts to setup the env variables
+
+# We use ZOOCFGDIR if defined,
+# otherwise we use /etc/zookeeper
+# or the conf directory that is
+# a sibling of this script's directory
+if [ "x$ZOOCFGDIR" = "x" ]
+then
+    if [ -d "/etc/zookeeper" ]
+    then
+        ZOOCFGDIR="/etc/zookeeper"
+    else
+        ZOOCFGDIR="$ZOOBINDIR/../conf"
+    fi
+fi
+
+if [ "x$ZOOCFG" = "x" ]
+then
+    ZOOCFG="zoo.cfg"
+fi
+
+ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
+
+if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
+then
+    . "$ZOOCFGDIR/zookeeper-env.sh"
+fi
+
+if [ "x${ZOO_LOG_DIR}" = "x" ]
+then
+    ZOO_LOG_DIR="."
+fi
+
+if [ "x${ZOO_LOG4J_PROP}" = "x" ]
+then
+    ZOO_LOG4J_PROP="INFO,CONSOLE"
+fi
+
+#add the zoocfg dir to classpath
+CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
+
+for i in "$ZOOBINDIR"/../src/java/lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../zookeeper-*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work for developers
+for d in "$ZOOBINDIR"/../build/lib/*.jar
+do
+   CLASSPATH="$d:$CLASSPATH"
+done
+
+#make it work for developers
+CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
+
+case "`uname`" in
+    CYGWIN*) cygwin=true ;;
+    *) cygwin=false ;;
+esac
+
+if $cygwin
+then
+    CLASSPATH=`cygpath -wp "$CLASSPATH"`
+fi
+
+#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkServer.sh b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkServer.sh
new file mode 100644
index 0000000..dd75a58
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkServer.sh
@@ -0,0 +1,120 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+# See the following page for extensive details on setting
+# up the JVM to accept JMX remote management:
+# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# by default we allow local JMX connections
+if [ "x$JMXLOCALONLY" = "x" ]
+then
+    JMXLOCALONLY=false
+fi
+
+if [ "x$JMXDISABLE" = "x" ]
+then
+    echo "JMX enabled by default"
+    # for some reason these two options are necessary on jdk6 on Ubuntu
+    #   accord to the docs they are not necessary, but otw jconsole cannot
+    #   do a local attach
+    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
+else
+    echo "JMX disabled by user request"
+    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+fi
+
+# Only follow symlinks if readlink supports it
+if readlink -f "$0" > /dev/null 2>&1
+then
+  ZOOBIN=`readlink -f "$0"`
+else
+  ZOOBIN="$0"
+fi
+ZOOBINDIR=`dirname "$ZOOBIN"`
+
+. "$ZOOBINDIR"/zkEnv.sh
+
+if [ "x$2" != "x" ]
+then
+    ZOOCFG="$ZOOCFGDIR/$2"
+fi
+
+if $cygwin
+then
+    ZOOCFG=`cygpath -wp "$ZOOCFG"`
+    # cygwin has a "kill" in the shell itself, gets confused
+    KILL=/bin/kill
+else
+    KILL=kill
+fi
+
+echo "Using config: $ZOOCFG"
+
+ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
+
+
+case $1 in
+start)
+    echo  "Starting zookeeper ... "
+    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
+    /bin/echo -n $! > "$ZOOPIDFILE"
+    echo STARTED
+    ;;
+stop)
+    echo "Stopping zookeeper ... "
+    if [ ! -f "$ZOOPIDFILE" ]
+    then
+    echo "error: could not find file $ZOOPIDFILE"
+    exit 1
+    else
+    $KILL -9 $(cat "$ZOOPIDFILE")
+    rm "$ZOOPIDFILE"
+    echo STOPPED
+    fi
+    ;;
+upgrade)
+    shift
+    echo "upgrading the servers to 3.*"
+    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
+    echo "Upgrading ... "
+    ;;
+restart)
+    shift
+    "$0" stop ${@}
+    sleep 3
+    "$0" start ${@}
+    ;;
+status)
+    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
+    if [ "x$STAT" = "x" ]
+    then
+        echo "Error contacting service. It is probably not running."
+    else
+        echo $STAT
+    fi
+    ;;
+*)
+    echo "Usage: $0 {start|stop|restart|status}" >&2
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkService.sh b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkService.sh
new file mode 100644
index 0000000..46296df
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkService.sh
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+zkcli_script=$1
+user=$2
+conf_dir=$3
+sudo su $user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkSmoke.sh b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkSmoke.sh
new file mode 100644
index 0000000..04f5a35
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/files/zkSmoke.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+zk_cli_shell=$1
+smoke_user=$2
+conf_dir=$3
+client_port=$4
+security_enabled=$5
+kinit_path_local=$6
+smoke_user_keytab=$7
+export ZOOKEEPER_EXIT_CODE=0
+test_output_file=/tmp/zkSmoke.out
+errors_expr="ERROR|Exception"
+acceptable_expr="SecurityException"
+zkhosts=` grep "^\s*server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
+zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
+echo "zk_node1=$zk_node1"
+if [[ $security_enabled == "True" ]]; then
+  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
+  sudo su $smoke_user -s /bin/bash - -c "$kinitcmd"
+fi
+
+function verify_output() {
+  if [ -f $test_output_file ]; then
+    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
+    if [ "$?" -eq 0 ]; then
+      echo "Error found in the zookeeper smoke test. Exiting."
+      echo $errors
+      exit 1
+    fi
+  fi
+}
+
+# Delete /zk_smoketest znode if exists
+sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${zk_cli_shell} -server $zk_node1:$client_port" 2>&1>$test_output_file
+# Create /zk_smoketest znode on one zookeeper server
+sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${zk_cli_shell} -server $zk_node1:$client_port" 2>&1>>$test_output_file
+verify_output
+
+for i in $zkhosts ; do
+  echo "Running test on host $i"
+  # Verify the data associated with znode across all the nodes in the zookeeper quorum
+  sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${zk_cli_shell} -server $i:$client_port"
+  sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${zk_cli_shell} -server $i:$client_port"
+  output=$(sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${zk_cli_shell} -server $i:$client_port")
+  echo $output | grep smoke_data
+  if [[ $? -ne 0 ]] ; then
+    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
+    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
+  fi
+done
+
+sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${zk_cli_shell} -server $zk_node1:$client_port"
+if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
+  echo "Zookeeper Smoke Test: Failed" 
+else
+   echo "Zookeeper Smoke Test: Passed" 
+fi
+exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/__init__.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/__init__.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params.py
new file mode 100644
index 0000000..b52d6e3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params.py
@@ -0,0 +1,88 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.default import default
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+hdp_stack_version = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+#hadoop params
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  zk_home = '/usr/hdp/current/zookeeper-client'
+  zk_bin = '/usr/hdp/current/zookeeper-client/bin'    # TODO Rolling Upgrade, needs to be server binary when starting server daemon...
+  zk_cli_shell = '/usr/hdp/current/zookeeper-client/bin/zkCli.sh'
+else:
+  zk_home = '/usr'
+  zk_bin = '/usr/lib/zookeeper/bin'
+  zk_cli_shell = "/usr/lib/zookeeper/bin/zkCli.sh"
+
+config_dir = "/etc/zookeeper/conf"
+zk_user =  config['configurations']['zookeeper-env']['zk_user']
+hostname = config['hostname']
+user_group = config['configurations']['cluster-env']['user_group']
+zk_env_sh_template = config['configurations']['zookeeper-env']['content']
+
+zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
+zk_data_dir = config['configurations']['zoo.cfg']['dataDir']
+zk_pid_dir = status_params.zk_pid_dir
+zk_pid_file = status_params.zk_pid_file
+zk_server_heapsize = "-Xmx1024m"
+
+clientPort = config['configurations']['zoo.cfg']['clientPort']
+
+if 'zoo.cfg' in config['configurations']:
+  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
+else:
+  zoo_cfg_properties_map = {}
+zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
+
+zk_principal_name = default("/configurations/zookeeper-env/zookeeper_principal_name", "zookeeper@EXAMPLE.COM")
+zk_principal = zk_principal_name.replace('_HOST',hostname.lower())
+
+java64_home = config['hostLevelParams']['java_home']
+
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_hosts.sort()
+
+zk_keytab_path = config['configurations']['zookeeper-env']['zookeeper_keytab_path']
+zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
+zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+#log4j.properties
+if (('zookeeper-log4j' in config['configurations']) and ('content' in config['configurations']['zookeeper-log4j'])):
+  log4j_props = config['configurations']['zookeeper-log4j']['content']
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/service_check.py
new file mode 100644
index 0000000..a4efa41
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/service_check.py
@@ -0,0 +1,46 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ZookeeperServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    File(format("{tmp_dir}/zkSmoke.sh"),
+         mode=0755,
+         content=StaticFile('zkSmoke.sh')
+    )
+
+    cmd_quorum = format("{tmp_dir}/zkSmoke.sh {zk_cli_shell} {smokeuser} {config_dir} {clientPort} "
+                  "{security_enabled} {kinit_path_local} {smokeUserKeytab}",
+                  smokeUserKeytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+    Execute(cmd_quorum,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+if __name__ == "__main__":
+  ZookeeperServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
new file mode 100644
index 0000000..36c5c30
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+zk_pid_dir = config['configurations']['zookeeper-env']['zk_pid_dir']
+zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
new file mode 100644
index 0000000..d03aaf7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
@@ -0,0 +1,111 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+
+from resource_management import *
+import sys
+
+
+def zookeeper(type = None):
+  import params
+
+  Directory(params.config_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  File(format("{config_dir}/zookeeper-env.sh"),
+       content=InlineTemplate(params.zk_env_sh_template),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+  
+
+  configFile("zoo.cfg", template_name="zoo.cfg.j2")
+  configFile("configuration.xsl", template_name="configuration.xsl.j2")
+
+  Directory(params.zk_pid_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_log_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_data_dir,
+            owner=params.zk_user,
+            recursive=True,
+            recursive_permission=True,
+            group=params.user_group
+  )
+
+  if type == 'server':
+    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
+
+    File(format("{zk_data_dir}/myid"),
+         mode = 0644,
+         content = myid
+    )
+
+  if (params.log4j_props != None):
+    File(format("{params.config_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.zk_user,
+         content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.config_dir}/log4j.properties"))):
+    File(format("{params.config_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.zk_user
+    )
+
+  if params.security_enabled:
+    if type == "server":
+      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+    else:
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+
+  File(format("{config_dir}/zoo_sample.cfg"),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+def configFile(name, template_name=None):
+  import params
+
+  File(format("{config_dir}/{name}"),
+       content=Template(template_name),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
new file mode 100644
index 0000000..4bffac3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from zookeeper import zookeeper
+
+class ZookeeperClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    zookeeper(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  ZookeeperClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
new file mode 100644
index 0000000..cc828a4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
@@ -0,0 +1,100 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import re
+
+from resource_management import *
+from resource_management.libraries.functions import get_unique_id_and_date
+from resource_management.libraries.functions.decorator import retry
+from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.core.shell import call
+
+from zookeeper import zookeeper
+from zookeeper_service import zookeeper_service
+
+
+@retry(times=10, sleep_time=2, err_class=Fail)
+def call_and_match_output(command, regex_expression, err_message):
+  """
+  Call the command and performs a regex match on the output for the specified expression.
+  :param command: Command to call
+  :param regex_expression: Regex expression to search in the output
+  """
+  # TODO Rolling Upgrade, does this work in Ubuntu? If it doesn't see dynamic_variable_interpretation.py to see how stdout was redirected
+  # to a temporary file, which was then read.
+  code, out = call(command, verbose=True)
+  if not (out and re.search(regex_expression, out, re.IGNORECASE)):
+    raise Fail(err_message)
+
+
+class ZookeeperServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    zookeeper(type='server')
+
+  def pre_rolling_restart(self, env):
+    Logger.info("Executing Rolling Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set zookeeper-server {version}"))
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    zookeeper_service(action = 'start')
+
+  def post_rolling_restart(self, env):
+    Logger.info("Executing Rolling Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    # Ensure that a quorum is still formed.
+    unique = get_unique_id_and_date()
+    create_command = format("echo 'create /{unique} mydata' | {zk_cli_shell}")
+    list_command = format("echo 'ls /' | {zk_cli_shell}")
+    delete_command = format("echo 'delete /{unique} ' | {zk_cli_shell}")
+
+    quorum_err_message = "Failed to establish zookeeper quorum"
+    call_and_match_output(create_command, 'Created', quorum_err_message)
+    call_and_match_output(list_command, r"\[.*?" + unique + ".*?\]", quorum_err_message)
+    call(delete_command, verbose=True)
+
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    zookeeper_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.zk_pid_file)
+
+if __name__ == "__main__":
+  ZookeeperServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
new file mode 100644
index 0000000..1495163
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
@@ -0,0 +1,50 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def zookeeper_service(action='start'):
+  import params
+
+  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
+
+  if action == 'start':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
+    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            not_if=no_op_test,
+            user=params.zk_user
+    )
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+
+      Execute(kinit_cmd,
+              user=params.smokeuser
+      )
+
+  elif action == 'stop':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
+    rm_pid = format("rm -f {zk_pid_file}")
+    Execute(daemon_cmd,
+            user=params.zk_user
+    )
+    Execute(rm_pid)

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/configuration.xsl.j2
new file mode 100644
index 0000000..8830c45
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/configuration.xsl.j2
@@ -0,0 +1,42 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+  <tr>
+     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+     <td><xsl:value-of select="value"/></td>
+     <td><xsl:value-of select="description"/></td>
+  </tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zoo.cfg.j2
new file mode 100644
index 0000000..1fbc0cc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zoo.cfg.j2
@@ -0,0 +1,53 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+{% for key, value in zoo_cfg_properties_map.iteritems() -%}
+  {{key}}={{value}}
+{% endfor %}
+{% for host in zookeeper_hosts -%}
+server.{{loop.index}}={{host}}:2888:3888
+{% endfor %}
+
+{% if security_enabled -%}
+authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
+jaasLoginRenew=3600000
+kerberos.removeHostFromPrincipal=true
+kerberos.removeRealmFromPrincipal=true
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zookeeper_client_jaas.conf.j2
new file mode 100644
index 0000000..38f9721
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zookeeper_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zookeeper_jaas.conf.j2
new file mode 100644
index 0000000..c3e9505
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/templates/zookeeper_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Server {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{zk_keytab_path}}"
+principal="{{zk_principal}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/alerts.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/alerts.json
deleted file mode 100644
index c731d75..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/alerts.json
+++ /dev/null
@@ -1,529 +0,0 @@
-{
-  "HDFS":{
-    "service": [
-      {
-        "name": "datanode_process_percent",
-        "label": "Percent DataNodes Available",
-        "description": "This alert is triggered if the number of down DataNodes in the cluster is greater than the configured critical threshold. It aggregates the results of DataNode process checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "datanode_process",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.1
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.3
-            }
-          }
-        }
-      },
-      {
-        "name": "datanode_storage_percent",
-        "label": "Percent DataNodes With Available Space",
-        "description": "This service-level alert is triggered if the storage on a certain percentage of DataNodes exceeds either the warning or critical threshold values.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "datanode_storage",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.1
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.3
-            }
-          }
-        }
-      },
-      {
-        "name": "journalnode_process_percent",
-        "label": "Percent JournalNodes Available",
-        "description": "This alert is triggered if the number of down JournalNodes in the cluster is greater than the configured critical threshold. It aggregates the results of JournalNode process checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "journalnode_process",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.33
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.50
-            }
-          }
-        }
-      }
-    ],
-    "NAMENODE": [
-      {
-        "name": "namenode_webui",
-        "label": "NameNode Web UI",
-        "description": "This host-level alert is triggered if the NameNode Web UI is unreachable.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY"
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.4f} seconds"
-            },
-            "warning":{
-              "text": "HTTP {0} response in {2:.4f} seconds"
-            },
-            "critical": {
-              "text": "Connection failed to {1}"
-            }
-          }
-        }
-      },
-      {
-        "name": "namenode_cpu",
-        "label": "NameNode Host CPU Utilization",
-        "description": "This host-level alert is triggered if CPU utilization of the NameNode exceeds certain warning and critical thresholds. It checks the NameNode JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY"
-          },
-          "reporting": {
-            "ok": {
-              "text": "{1} CPU, load {0:.1%}"
-            },
-            "warning": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 200
-            },
-            "critical": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 250
-            },
-            "units" : "%"
-          },
-          "jmx": {
-            "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-            ],
-            "value": "{0} * 100"
-          }
-        }
-      },
-      {
-        "name": "namenode_hdfs_blocks_health",
-        "label": "NameNode Blocks Health",
-        "description": "This service-level alert is triggered if the number of corrupt or missing blocks exceeds the configured critical threshold. The threshold values are in blocks.",
-        "interval": 2,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY"
-          },
-          "reporting": {
-            "ok": {
-              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]"
-            },
-            "warning": {
-              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
-              "value": 1
-            },          
-            "critical": {
-              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
-              "value": 1
-            },
-            "units" : "Blocks"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=FSNamesystem/MissingBlocks",
-              "Hadoop:service=NameNode,name=FSNamesystem/BlocksTotal"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "namenode_hdfs_capacity_utilization",
-        "label": "HDFS Capacity Utilization",
-        "description": "This service-level alert is triggered if the HDFS capacity utilization exceeds the configured warning and critical thresholds. It checks the NameNode JMX Servlet for the CapacityUsed and CapacityRemaining properties. The threshold values are in percent.",
-        "interval": 2,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY"
-          },
-          "reporting": {
-            "ok": {
-              "text": "Capacity Used:[{2:d}%, {0}], Capacity Remaining:[{1}]"
-            },
-            "warning": {
-              "text": "Capacity Used:[{2:d}%, {0}], Capacity Remaining:[{1}]",
-              "value": 80
-            },          
-            "critical": {
-              "text": "Capacity Used:[{2:d}%, {0}], Capacity Remaining:[{1}]",
-              "value": 90
-            },
-            "units" : "%"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityUsed",
-              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityRemaining"
-            ],
-            "value": "{0}/({0} + {1}) * 100"
-          }
-        }
-      },
-      {
-        "name": "namenode_rpc_latency",
-        "label": "NameNode RPC Latency",
-        "description": "This host-level alert is triggered if the NameNode RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for NameNode operations. The threshold values are in milliseconds.",
-        "interval": 2,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY"
-          },
-          "reporting": {
-            "ok": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
-            },
-            "warning": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 3000
-            },          
-            "critical": {
-              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
-              "value": 5000
-            },
-            "units" : "ms"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
-              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
-            ],
-            "value": "{0}"
-          }
-        }
-      },
-      {
-        "name": "namenode_directory_status",
-        "label": "NameNode Directory Status",
-        "description": "This host-level alert is triggered if any of the the NameNode's NameDirStatuses metric reports a failed directory. The threshold values are in the number of directories that are not healthy",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.namenode.http-address}}",
-            "https": "{{hdfs-site/dfs.namenode.https-address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY"
-          },
-          "reporting": {
-            "ok": {
-              "text": "Directories are healthy"
-            },
-            "warning": {
-              "text": "Failed directory count: {1}",
-              "value": 1
-            },          
-            "critical": {
-              "text": "Failed directory count: {1}",
-              "value": 1
-            },
-            "units" : "Dirs"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=NameNode,name=NameNodeInfo/NameDirStatuses"
-            ],
-            "value": "calculate(args)\ndef calculate(args):\n  import json\n  json_statuses = json.loads({0})\n  return len(json_statuses['failed']) if 'failed' in json_statuses else 0"
-          }
-        }
-      },
-      {
-        "name": "namenode_process",
-        "label": "NameNode Process",
-        "description": "This host-level alert is triggered if the NameNode process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "{{hdfs-site/dfs.namenode.http-address}}",
-          "default_port": 50070,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      },
-      {
-        "name": "namenode_last_checkpoint",
-        "label": "NameNode Last Checkpoint",
-        "description": "This service-level alert will trigger if the last time that the NameNode performed a checkpoint was too long ago. It will also trigger if the number of uncommitted transactions is beyond a certain threshold.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDP/2.0.6/services/HDFS/package/files/alert_checkpoint_time.py"
-        }
-      },
-      {
-        "name": "namenode_ha_health",
-        "label": "NameNode High Availability Health",
-        "description": "This service-level alert is triggered if either the Active NameNode or Standby NameNode are not running.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "ignore_host": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDP/2.0.6/services/HDFS/package/files/alert_ha_namenode_health.py"
-        }
-      }
-    ],
-    "SECONDARY_NAMENODE": [
-      {
-        "name": "secondary_namenode_process",
-        "label": "Secondary NameNode Process",
-        "description": "This host-level alert is triggered if the Secondary NameNode process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "{{hdfs-site/dfs.namenode.secondary.http-address}}",
-          "default_port": 50071,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ],
-    "JOURNALNODE": [
-      {
-        "name": "journalnode_process",
-        "label": "JournalNode Process",
-        "description": "This host-level alert is triggered if the JournalNode process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "PORT",        
-          "uri": "{{hdfs-site/dfs.journalnode.http-address}}",
-          "default_port": 8480,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ],      
-    "DATANODE": [
-      {
-        "name": "datanode_process",
-        "label": "DataNode Process",
-        "description": "This host-level alert is triggered if the individual DataNode processes cannot be established to be up and listening on the network.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "PORT",        
-          "uri": "{{hdfs-site/dfs.datanode.address}}",
-          "default_port": 50010,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      },
-      {
-        "name": "datanode_webui",
-        "label": "DataNode Web UI",
-        "description": "This host-level alert is triggered if the DataNode Web UI is unreachable.",
-        "interval": 1,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{hdfs-site/dfs.datanode.http.address}}",
-            "https": "{{hdfs-site/dfs.datanode.https.address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY"
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.4f} seconds"
-            },
-            "warning":{
-              "text": "HTTP {0} response in {2:.4f} seconds"
-            },
-            "critical": {
-              "text": "Connection failed to {1}"
-            }
-          }
-        }
-      },    
-      {
-        "name": "datanode_storage",
-        "label": "DataNode Storage",
-        "description": "This host-level alert is triggered if storage capacity if full on the DataNode. It checks the DataNode JMX Servlet for the Capacity and Remaining properties. The threshold values are in percent.",
-        "interval": 2,
-        "scope": "HOST",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hdfs-site/dfs.datanode.http.address}}",
-            "https": "{{hdfs-site/dfs.datanode.https.address}}",
-            "https_property": "{{hdfs-site/dfs.http.policy}}",
-            "https_property_value": "HTTPS_ONLY"
-          },
-          "reporting": {
-            "ok": {
-              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:d}% Used, {1}]"
-            },
-            "warning": {
-              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:d}% Used, {1}]",
-              "value": 80
-            },
-            "critical": {
-              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:d}% Used, {1}]",
-              "value": 90
-            },
-            "units" : "%"
-          },
-          "jmx": {
-            "property_list": [
-              "Hadoop:service=DataNode,name=FSDatasetState-*/Remaining",
-              "Hadoop:service=DataNode,name=FSDatasetState-*/Capacity"
-            ],
-            "value": "({1} - {0})/{1} * 100"
-          }
-        }
-      }    
-    ],
-    "ZKFC": [
-      {
-        "name": "hdfs_zookeeper_failover_controller_process",
-        "label": "ZooKeeper Failover Controller Process",
-        "description": "This host-level alert is triggered if the ZooKeeper Failover Controller process cannot be confirmed to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",        
-          "uri": "{{core-site/ha.zookeeper.quorum}}",
-          "default_port": 2181,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index 6c72e9d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,180 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
-    </description>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.defaultFS</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <property>
-    <name>ipc.server.tcpnodelay</name>
-    <value>true</value>
-    <description>Turn on/off Nagle's algorithm for the TCP socket
-      connection on
-      the server. Setting to true disables the algorithm and may
-      decrease latency
-      with a cost of more/smaller packets.
-    </description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>mapreduce.jobtracker.webinterface.trusted</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value>false</value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>
-        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
-        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
-        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
-        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
-        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
-        DEFAULT
-    </value>
-<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-</configuration>


[31/37] ambari git commit: AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/startMetastore.sh b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/startMetastore.sh
new file mode 100644
index 0000000..da0f60b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/startMetastore.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 hive --service metastore -hiveconf hive.log.file=hivemetastore.log -hiveconf hive.log.dir=$5 > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/templetonSmoke.sh
new file mode 100644
index 0000000..22202ee
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/files/templetonSmoke.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export ttonhost=$1
+export smoke_test_user=$2
+export smoke_user_keytab=$3
+export security_enabled=$4
+export kinit_path_local=$5
+export ttonurl="http://${ttonhost}:50111/templeton/v1"
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else
+  kinitcmd=""
+fi
+
+export no_proxy=$ttonhost
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
+retVal=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0
+
+#try hcat ddl command
+echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
+retVal=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit  1
+fi
+
+# NOT SURE?? SUHAS
+if [[ $security_enabled == "true" ]]; then
+  echo "Templeton Pig Smoke Tests not run in secure mode"
+  exit 0
+fi
+
+#try pig query
+outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
+ttonTestOutput="/tmp/idtest.${outname}.out";
+ttonTestInput="/tmp/idtest.${outname}.in";
+ttonTestScript="idtest.${outname}.pig"
+
+echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
+echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
+echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
+
+#copy pig script to hdfs
+sudo su ${smoke_test_user} -s /bin/bash - -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
+
+#copy input file to hdfs
+sudo su ${smoke_test_user} -s /bin/bash - -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
+
+#create, copy post args file
+echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
+
+#submit pig query
+cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
+retVal=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/metainfo.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/metainfo.xml
new file mode 100644
index 0000000..26edb84
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/metainfo.xml
@@ -0,0 +1,294 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <displayName>Hive</displayName>
+      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+      <version>0.12.0.2.0</version>
+      <components>
+
+        <component>
+          <name>HIVE_METASTORE</name>
+          <displayName>Hive Metastore</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>HIVE/HIVE_SERVER</co-locate>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/hive_metastore.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+
+        <component>
+          <name>HIVE_SERVER</name>
+          <displayName>HiveServer2</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hive_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <displayName>WebHCat Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <clientsToUpdateConfigs>
+            <client>HCAT</client>
+          </clientsToUpdateConfigs>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>PIG/PIG</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/webhcat_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>MYSQL_SERVER</name>
+          <displayName>MySQL Server</displayName>
+          <category>MASTER</category>
+          <cardinality>0-1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
+          <commandScript>
+            <script>scripts/mysql_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>CLEAN</name>
+                <commandScript>
+                  <script>scripts/mysql_server.py</script>
+                  <scriptType>PYTHON</scriptType>
+                  <timeout>600</timeout>
+            </commandScript>
+                </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HIVE_CLIENT</name>
+          <displayName>Hive Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/hive_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hive-site.xml</fileName>
+              <dictionaryName>hive-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-env.sh</fileName>
+              <dictionaryName>hive-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-log4j.properties</fileName>
+              <dictionaryName>hive-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-exec-log4j.properties</fileName>
+              <dictionaryName>hive-exec-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>HCAT</name>
+          <displayName>HCat Client</displayName>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hcat_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>hcat-env.sh</fileName>
+              <dictionaryName>hcat-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+        <configuration-dependencies>
+          <config-type>hive-site</config-type>
+        </configuration-dependencies>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hive</name>
+            </package>
+            <package>
+              <name>hcatalog</name>
+            </package>
+            <package>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <name>webhcat-tar-pig</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>YARN</service>
+        <service>TEZ</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>hive-log4j</config-type>
+        <config-type>hive-exec-log4j</config-type>
+        <config-type>hive-env</config-type>
+        <config-type>webhcat-site</config-type>
+        <config-type>webhcat-env</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py
new file mode 100644
index 0000000..31c1673
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+
+def hcat():
+  import params
+
+  Directory(params.hive_conf_dir,
+            recursive=True,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+
+  Directory(params.hcat_conf_dir,
+            recursive=True,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+  Directory(params.hcat_pid_dir,
+            owner=params.webhcat_user,
+            recursive=True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_client_conf_dir,
+            configurations=params.config['configurations']['hive-site'],
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+
+  File(format("{hcat_conf_dir}/hcat-env.sh"),
+       owner=params.hcat_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hcat_env_sh_template)
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py
new file mode 100644
index 0000000..8b5921a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hcat import hcat
+
+class HCatClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+
+    hcat()
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_service_check.py
new file mode 100644
index 0000000..fd4c6ca
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_service_check.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions import get_unique_id_and_date
+
+def hcat_service_check():
+    import params
+    unique = get_unique_id_and_date()
+    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
+    test_cmd = format("fs -test -e {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
+    else:
+      kinit_cmd = ""
+
+    File(format("{tmp_dir}/hcatSmoke.sh"),
+         content=StaticFile("hcatSmoke.sh"),
+         mode=0755
+    )
+
+    prepare_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} prepare")
+
+    Execute(prepare_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', params.execute_path],
+            logoutput=True)
+
+    if params.security_enabled:
+      ExecuteHadoop(test_cmd,
+                    user=params.hdfs_user,
+                    logoutput=True,
+                    conf_dir=params.hadoop_conf_dir,
+                    security_enabled=params.security_enabled,
+                    kinit_path_local=params.kinit_path_local,
+                    keytab=params.hdfs_user_keytab,
+                    principal=params.hdfs_principal_name,
+                    bin_dir=params.execute_path
+      )
+    else:
+      ExecuteHadoop(test_cmd,
+                    user=params.hdfs_user,
+                    logoutput=True,
+                    conf_dir=params.hadoop_conf_dir,
+                    security_enabled=params.security_enabled,
+                    kinit_path_local=params.kinit_path_local,
+                    keytab=params.hdfs_user_keytab,
+                    bin_dir=params.execute_path
+      )
+
+    cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup")
+
+    Execute(cleanup_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', params.execute_path],
+            logoutput=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
new file mode 100644
index 0000000..2dbdd6f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -0,0 +1,232 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+import os
+
+
+def hive(name=None):
+  import params
+
+  if name == 'hiveserver2':
+
+    params.HdfsDirectory(params.hive_apps_whs_dir,
+                         action="create_delayed",
+                         owner=params.hive_user,
+                         mode=0777
+    )
+    params.HdfsDirectory(params.hive_hdfs_user_dir,
+                         action="create_delayed",
+                         owner=params.hive_user,
+                         mode=params.hive_hdfs_user_mode
+    )
+    params.HdfsDirectory(None, action="create")
+
+  Directory(params.hive_conf_dir_prefix,
+            mode=0755
+  )
+
+  # We should change configurations for client as well as for server.
+  # The reason is that stale-configs are service-level, not component.
+  for conf_dir in params.hive_conf_dirs_list:
+    fill_conf_dir(conf_dir)
+
+  XmlConfig("hive-site.xml",
+            conf_dir=params.hive_config_dir,
+            configurations=params.config['configurations']['hive-site'],
+            configuration_attributes=params.config['configuration_attributes']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+  
+  if params.hive_specific_configs_supported and name == 'hiveserver2':
+    XmlConfig("hiveserver2-site.xml",
+              conf_dir=params.hive_server_conf_dir,
+              configurations=params.config['configurations']['hiveserver2-site'],
+              configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
+              owner=params.hive_user,
+              group=params.user_group,
+              mode=0644)
+  
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=InlineTemplate(params.hive_env_sh_template)
+  )
+
+  if name == 'metastore' or name == 'hiveserver2':
+    jdbc_connector()
+    
+  environment = {
+    "no_proxy": format("{ambari_server_hostname}")
+  }
+
+  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf -x \"\" "
+               "--retry 5 "
+               "{jdk_location}{check_db_connection_jar_name} "
+               "-o {check_db_connection_jar_name}'")
+
+  Execute(cmd,
+          not_if=format("[ -f {check_db_connection_jar} ]"),
+          environment = environment)
+
+  if name == 'metastore':
+    File(params.start_metastore_path,
+         mode=0755,
+         content=StaticFile('startMetastore.sh')
+    )
+    if params.init_metastore_schema:
+      create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
+                                 "{hive_bin}/schematool -initSchema "
+                                 "-dbType {hive_metastore_db_type} "
+                                 "-userName {hive_metastore_user_name} "
+                                 "-passWord {hive_metastore_user_passwd!p}")
+
+      check_schema_created_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
+                                        "{hive_bin}/schematool -info "
+                                        "-dbType {hive_metastore_db_type} "
+                                        "-userName {hive_metastore_user_name} "
+                                        "-passWord {hive_metastore_user_passwd!p}")
+
+      Execute(create_schema_cmd,
+              not_if = check_schema_created_cmd
+      )
+  elif name == 'hiveserver2':
+    File(params.start_hiveserver2_path,
+         mode=0755,
+         content=Template(format('{start_hiveserver2_script}'))
+    )
+
+  if name != "client":
+    crt_directory(params.hive_pid_dir)
+    crt_directory(params.hive_log_dir)
+    crt_directory(params.hive_var_lib)
+
+def fill_conf_dir(component_conf_dir):
+  import params
+  
+  Directory(component_conf_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=component_conf_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0644)
+
+
+  crt_file(format("{component_conf_dir}/hive-default.xml.template"))
+  crt_file(format("{component_conf_dir}/hive-env.sh.template"))
+
+  log4j_exec_filename = 'hive-exec-log4j.properties'
+  if (params.log4j_exec_props != None):
+    File(format("{component_conf_dir}/{log4j_exec_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=params.log4j_exec_props
+    )
+  elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
+    File(format("{component_conf_dir}/{log4j_exec_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
+    )
+
+  log4j_filename = 'hive-log4j.properties'
+  if (params.log4j_props != None):
+    File(format("{component_conf_dir}/{log4j_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=params.log4j_props
+    )
+  elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
+    File(format("{component_conf_dir}/{log4j_filename}"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hive_user,
+         content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
+    )
+
+
+def crt_directory(name):
+  import params
+
+  Directory(name,
+            recursive=True,
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+
+def crt_file(name):
+  import params
+
+  File(name,
+       owner=params.hive_user,
+       group=params.user_group
+  )
+
+
+def jdbc_connector():
+  import params
+
+  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
+    File(params.target,
+         action="delete",
+    )
+    Execute(('cp', format('/usr/share/java/{jdbc_jar_name}'), params.target),
+            not_if=format("test -f {target}"),
+            creates=params.target,
+            path=["/bin", "/usr/bin/"],
+            sudo=True
+    )
+  elif params.hive_jdbc_driver == "org.postgresql.Driver":
+    cmd = format("mkdir -p {artifact_dir} ; rm -f {target} ; cp /usr/share/java/{jdbc_jar_name} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            creates=params.target,
+            environment= {'PATH' : params.execute_path },
+            path=["/bin", "usr/bin/"])
+
+  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+    environment = {
+      "no_proxy": format("{ambari_server_hostname}")
+    }
+
+    cmd = format(
+      "mkdir -p {artifact_dir} ; "
+      "curl -kf -x \"\" --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
+      "rm -f {target} ; "
+      "cp {driver_curl_target} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            path=["/bin", "/usr/bin/"],
+            environment=environment)

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py
new file mode 100644
index 0000000..499f632
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+
+from hive import hive
+
+class HiveClient(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='client')
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
new file mode 100644
index 0000000..dc02a7d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+from mysql_service import mysql_service
+
+class HiveMetastore(Script):
+
+  def install(self, env):
+    import params
+    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='metastore')
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'metastore',
+                   action = 'start'
+    )
+
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+
+    hive_service( 'metastore',
+                   action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
new file mode 100644
index 0000000..fa8ece4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hive import hive
+from hive_service import hive_service
+from resource_management.libraries.functions.dynamic_variable_interpretation import copy_tarballs_to_hdfs
+from install_jars import install_tez_jars
+
+class HiveServer(Script):
+
+  def install(self, env):
+    import params
+    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    if not (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >=0):
+      install_tez_jars()
+    hive(name='hiveserver2')
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+
+    # This function is needed in HDP 2.2, but it is safe to call in earlier versions.
+    copy_tarballs_to_hdfs('mapreduce', params.tez_user, params.hdfs_user, params.user_group)
+    copy_tarballs_to_hdfs('tez', params.tez_user, params.hdfs_user, params.user_group)
+
+    hive_service( 'hiveserver2',
+                  action = 'start'
+    )
+
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+
+    hive_service( 'hiveserver2',
+                  action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+
+if __name__ == "__main__":
+  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_service.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_service.py
new file mode 100644
index 0000000..8e5d878
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_service.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+import time
+from resource_management.core import shell
+
+def hive_service(
+    name,
+    action='start'):
+
+  import params
+
+  if name == 'metastore':
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    cmd = format(
+      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir} {hive_log_dir}")
+  elif name == 'hiveserver2':
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    cmd = format(
+      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir} {hive_log_dir}")
+
+  process_id_exists = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+  
+  if action == 'start':
+    if name == 'hiveserver2':
+      check_fs_root()
+
+    demon_cmd = format("{cmd}")
+    
+    Execute(demon_cmd,
+            user=params.hive_user,
+            environment={'HADOOP_HOME': params.hadoop_home},
+            path=params.execute_path,
+            not_if=process_id_exists
+    )
+
+    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or \
+       params.hive_jdbc_driver == "org.postgresql.Driver" or \
+       params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+      
+      db_connection_check_command = format(
+        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_jdbc_driver}")
+      
+      Execute(db_connection_check_command,
+              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10)
+      
+    # AMBARI-5800 - wait for the server to come up instead of just the PID existance
+    if name == 'hiveserver2':
+      SOCKET_WAIT_SECONDS = 120
+      address=params.hive_server_host
+      port=int(params.hive_server_port)
+      
+      start_time = time.time()
+      end_time = start_time + SOCKET_WAIT_SECONDS
+
+      is_service_socket_valid = False
+      print "Waiting for the Hive server to start..."
+      if params.security_enabled:
+        kinitcmd=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
+      else:
+        kinitcmd=None
+      while time.time() < end_time:
+        try:
+          check_thrift_port_sasl(address, port, params.hive_server2_authentication,
+                                 params.hive_server_principal, kinitcmd, params.smokeuser)
+          is_service_socket_valid = True
+          break
+        except Exception, e:
+          time.sleep(5)
+
+      elapsed_time = time.time() - start_time
+      
+      if is_service_socket_valid == False: 
+        raise Fail("Connection to Hive server %s on port %s failed after %d seconds" % (address, port, elapsed_time))
+      
+      print "Successfully connected to Hive at %s on port %s after %d seconds" % (address, port, elapsed_time)    
+            
+  elif action == 'stop':
+    demon_cmd = format("sudo kill `cat {pid_file}`")
+    Execute(demon_cmd,
+         not_if = format("! ({process_id_exists})")
+    )
+    File(pid_file,
+         action = "delete",
+    )
+
+def check_fs_root():
+  import params  
+  fs_root_url = format("{fs_root}{hive_apps_whs_dir}")
+  cmd = format("metatool -listFSRoot 2>/dev/null | grep hdfs:// | grep -v '.db$'")
+  code, out = shell.call(cmd, user=params.hive_user)
+  if code == 0 and fs_root_url.strip() != out.strip():
+    cmd = format("metatool -updateLocation {fs_root}{hive_apps_whs_dir} {out}")
+    Execute(cmd,
+            environment= {'PATH' : params.execute_path },
+            user=params.hive_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/install_jars.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/install_jars.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/install_jars.py
new file mode 100644
index 0000000..a18ca72
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/install_jars.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+
+def install_tez_jars():
+  import params
+
+  destination_hdfs_dirs = get_tez_hdfs_dir_paths(params.tez_lib_uris)
+
+  # If tez libraries are to be stored in hdfs
+  if destination_hdfs_dirs:
+    for hdfs_dir in destination_hdfs_dirs:
+      params.HdfsDirectory(hdfs_dir,
+                           action="create_delayed",
+                           owner=params.tez_user,
+                           mode=0755
+      )
+    pass
+    params.HdfsDirectory(None, action="create")
+
+    if params.security_enabled:
+      kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
+    else:
+      kinit_if_needed = ""
+
+    if kinit_if_needed:
+      Execute(kinit_if_needed,
+              user=params.tez_user,
+              path='/bin'
+      )
+    pass
+
+    app_dir_path = None
+    lib_dir_path = None
+
+    if len(destination_hdfs_dirs) > 0:
+      for path in destination_hdfs_dirs:
+        if 'lib' in path:
+          lib_dir_path = path
+        else:
+          app_dir_path = path
+        pass
+      pass
+    pass
+
+    if app_dir_path:
+      for scr_file, dest_file in params.app_dir_files.iteritems():
+        CopyFromLocal(scr_file,
+                      mode=0755,
+                      owner=params.tez_user,
+                      dest_dir=app_dir_path,
+                      dest_file=dest_file,
+                      kinnit_if_needed=kinit_if_needed,
+                      hdfs_user=params.hdfs_user,
+                      hadoop_bin_dir=params.hadoop_bin_dir,
+                      hadoop_conf_dir=params.hadoop_conf_dir
+        )
+
+    if lib_dir_path:
+      CopyFromLocal(params.tez_local_lib_jars,
+                    mode=0755,
+                    owner=params.tez_user,
+                    dest_dir=lib_dir_path,
+                    kinnit_if_needed=kinit_if_needed,
+                    hdfs_user=params.hdfs_user,
+                    hadoop_bin_dir=params.hadoop_bin_dir,
+                    hadoop_conf_dir=params.hadoop_conf_dir
+      )
+    pass
+
+
+def get_tez_hdfs_dir_paths(tez_lib_uris = None):
+  hdfs_path_prefix = 'hdfs://'
+  lib_dir_paths = []
+  if tez_lib_uris and tez_lib_uris.strip().find(hdfs_path_prefix, 0) != -1:
+    dir_paths = tez_lib_uris.split(',')
+    for path in dir_paths:
+      if not "tez.tar.gz" in path:
+        lib_dir_path = path.replace(hdfs_path_prefix, '')
+        lib_dir_path = lib_dir_path if lib_dir_path.endswith(os.sep) else lib_dir_path + os.sep
+        lib_dir_paths.append(lib_dir_path)
+      else:
+        lib_dir_path = path.replace(hdfs_path_prefix, '')
+        lib_dir_paths.append(os.path.dirname(lib_dir_path))
+    pass
+  pass
+
+  return lib_dir_paths

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py
new file mode 100644
index 0000000..91a699b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_server.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from mysql_service import mysql_service
+
+
+class MysqlServer(Script):
+  def install(self, env):
+    import params
+    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
+    self.configure(env)
+
+  def clean(self, env):
+    import params, mysql_users
+    env.set_params(params)
+    mysql_users.mysql_deluser(params)
+
+  def configure(self, env):
+    import params, mysql_users
+    env.set_params(params)
+    mysql_users.mysql_adduser(params)
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    mysql_service(daemon_name=params.daemon_name, action='start')
+
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    mysql_service(daemon_name=params.daemon_name, action='stop')
+
+  def status(self, env):
+    import status_params
+
+    mysql_service(daemon_name=status_params.daemon_name, action='status')
+
+
+if __name__ == "__main__":
+  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py
new file mode 100644
index 0000000..2f0c6f6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_service.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def mysql_service(daemon_name=None, action='start'): 
+  status_cmd = format('service {daemon_name} status | grep running')
+  cmd = ('service', daemon_name, action)
+
+  if action == 'status':
+    Execute(status_cmd)
+  elif action == 'stop':
+    import params
+    Execute(cmd,
+            logoutput = True,
+            only_if = status_cmd,
+            sudo = True,
+    )
+  elif action == 'start':
+    import params
+    # required for running hive
+    replace_bind_address = ('sed','-i','s|^bind-address[ \t]*=.*|bind-address = 0.0.0.0|',params.mysql_configname)
+    Execute(replace_bind_address,
+            sudo = True,
+    )
+    
+    Execute(cmd,
+      logoutput = True,
+      not_if = status_cmd,
+      sudo = True,
+    )
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py
new file mode 100644
index 0000000..9961c18
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/mysql_users.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+# Used to add hive access to the needed components
+def mysql_adduser(params):
+  File(params.mysql_adduser_path,
+       mode=0755,
+       content=StaticFile('addMysqlUser.sh')
+  )
+  hive_server_host = format("{hive_server_host}")
+  hive_metastore_host = format("{hive_metastore_host}")
+
+  add_metastore_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_metastore_host}"
+  add_hiveserver_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_server_host}"
+  if (hive_server_host == hive_metastore_host):
+    cmd = format(add_hiveserver_cmd)
+  else:
+    cmd = format(add_hiveserver_cmd + ";" + add_metastore_cmd)
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )
+
+# Removes hive access from components
+def mysql_deluser(params):
+  File(params.mysql_deluser_path,
+       mode=0755,
+       content=StaticFile('removeMysqlUser.sh')
+  )
+  hive_server_host = format("{hive_server_host}")
+  hive_metastore_host = format("{hive_metastore_host}")
+
+  del_hiveserver_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_server_host}"
+  del_metastore_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_host}"
+  if (hive_server_host == hive_metastore_host):
+    cmd = format(del_hiveserver_cmd)
+  else:
+    cmd = format(
+      del_hiveserver_cmd + ";" + del_metastore_cmd)
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
new file mode 100644
index 0000000..bb7f1f4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management import *
+import status_params
+import os
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+# This is expected to be of the form #.#.#.#
+hdp_stack_version = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
+stack_is_hdp21 = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.1') >= 0 and compare_versions(hdp_stack_version, '2.2') < 0
+
+# Hadoop params
+# TODO, this logic should initialize these parameters in a file inside the HDP 2.2 stack.
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >=0:
+  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
+  hadoop_home = '/usr/hdp/current/hadoop-client'
+  hive_bin = '/usr/hdp/current/hive-client/bin'
+  hive_lib = '/usr/hdp/current/hive-client/lib'
+
+  hcat_lib = '/usr/hdp/current/hive-webhcat/share/hcatalog'
+  webhcat_bin_dir = '/usr/hdp/current/hive-webhcat/sbin'
+  
+  hive_specific_configs_supported = True
+else:
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_home = '/usr'
+  hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
+  hive_bin = '/usr/lib/hive/bin'
+  hive_lib = '/usr/lib/hive/lib/'
+  pig_tar_file = '/usr/share/HDP-webhcat/pig.tar.gz'
+  hive_tar_file = '/usr/share/HDP-webhcat/hive.tar.gz'
+  sqoop_tar_file = '/usr/share/HDP-webhcat/sqoop*.tar.gz'
+
+  if hdp_stack_version != "" and compare_versions(hdp_stack_version, "2.1.0.0") < 0:
+    hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+    webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
+  # for newer versions
+  else:
+    hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
+    webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
+    
+  hive_specific_configs_supported = False
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hive_conf_dir_prefix = "/etc/hive"
+hive_conf_dir = format("{hive_conf_dir_prefix}/conf")
+hive_client_conf_dir = format("{hive_conf_dir_prefix}/conf")
+hive_server_conf_dir = format("{hive_conf_dir_prefix}/conf.server")
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, "2.1.0.0") < 0:
+  hcat_conf_dir = '/etc/hcatalog/conf'
+  config_dir = '/etc/hcatalog/conf'
+# for newer versions
+else:
+  hcat_conf_dir = '/etc/hive-hcatalog/conf'
+  config_dir = '/etc/hive-webhcat/conf'
+
+execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
+
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
+
+#users
+hive_user = config['configurations']['hive-env']['hive_user']
+#JDBC driver jar name
+hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
+if hive_jdbc_driver == "com.mysql.jdbc.Driver":
+  jdbc_jar_name = "mysql-connector-java.jar"
+  jdbc_symlink_name = "mysql-jdbc-driver.jar"
+elif hive_jdbc_driver == "org.postgresql.Driver":
+  jdbc_jar_name = "postgresql-jdbc.jar"
+  jdbc_symlink_name = "postgres-jdbc-driver.jar"
+elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+  jdbc_jar_name = "ojdbc6.jar"
+  jdbc_symlink_name = "oracle-jdbc-driver.jar"
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+
+#common
+hive_metastore_host = config['clusterHostInfo']['hive_metastore_host'][0]
+hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
+hive_var_lib = '/var/lib/hive'
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
+hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
+hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
+hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
+hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
+smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+
+fs_root = config['configurations']['core-site']['fs.defaultFS']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+
+#hive_env
+hive_dbroot = config['configurations']['hive-env']['hive_dbroot']
+hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
+hive_pid_dir = status_params.hive_pid_dir
+hive_pid = status_params.hive_pid
+#Default conf dir for client
+hive_conf_dirs_list = [hive_server_conf_dir, hive_client_conf_dir]
+
+if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
+  hive_config_dir = hive_server_conf_dir
+else:
+  hive_config_dir = hive_client_conf_dir
+
+#hive-site
+hive_database_name = config['configurations']['hive-env']['hive_database_name']
+
+#Starting hiveserver2
+start_hiveserver2_script = 'startHiveserver2.sh.j2'
+
+##Starting metastore
+start_metastore_script = 'startMetastore.sh'
+hive_metastore_pid = status_params.hive_metastore_pid
+java_share_dir = '/usr/share/java'
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['cluster-env']['user_group']
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+
+target = format("{hive_lib}/{jdbc_jar_name}")
+
+jdk_location = config['hostLevelParams']['jdk_location']
+driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
+
+start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
+start_metastore_path = format("{tmp_dir}/start_metastore_script")
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
+java64_home = config['hostLevelParams']['java_home']
+
+##### MYSQL
+
+db_name = config['configurations']['hive-env']['hive_database_name']
+mysql_group = 'mysql'
+mysql_host = config['clusterHostInfo']['hive_mysql_host']
+
+mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
+mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
+
+######## Metastore Schema
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, "2.1.0.0") < 0:
+  init_metastore_schema = False
+else:
+  init_metastore_schema = True
+
+########## HCAT
+
+hcat_dbroot = hcat_lib
+
+hcat_user = config['configurations']['hive-env']['hcat_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
+
+hcat_pid_dir = status_params.hcat_pid_dir
+hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+hcat_env_sh_template = config['configurations']['hcat-env']['content']
+
+#hive-log4j.properties.template
+if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
+  log4j_props = config['configurations']['hive-log4j']['content']
+else:
+  log4j_props = None
+
+#hive-exec-log4j.properties.template
+if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
+  log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
+else:
+  log4j_exec_props = None
+
+daemon_name = status_params.daemon_name
+hive_env_sh_template = config['configurations']['hive-env']['content']
+
+hive_hdfs_user_dir = format("/user/{hive_user}")
+hive_hdfs_user_mode = 0700
+hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
+#for create_hdfs_directory
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+# Tez-related properties
+tez_user = config['configurations']['tez-env']['tez_user']
+
+# Tez jars
+tez_local_api_jars = '/usr/lib/tez/tez*.jar'
+tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
+app_dir_files = {tez_local_api_jars:None}
+
+# Tez libraries
+tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
+
+if System.get_instance().os_family == "ubuntu":
+  mysql_configname = '/etc/mysql/my.cnf'
+else:
+  mysql_configname = '/etc/my.cnf'
+  
+mysql_user = 'mysql'
+
+# Hive security
+hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
+
+mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
+
+# There are other packages that contain /usr/share/java/mysql-connector-java.jar (like libmysql-java),
+# trying to install mysql-connector-java upon them can cause packages to conflict.
+if os.path.exists(mysql_jdbc_driver_jar):
+  hive_exclude_packages = ['mysql-connector-java']
+else:  
+  hive_exclude_packages = []
+
+########################################################
+########### WebHCat related params #####################
+########################################################
+
+webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
+templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+templeton_pid_dir = status_params.hcat_pid_dir
+
+webhcat_pid_file = status_params.webhcat_pid_file
+
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+webhcat_apps_dir = "/apps/webhcat"
+
+hcat_hdfs_user_dir = format("/user/{hcat_user}")
+hcat_hdfs_user_mode = 0755
+webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+webhcat_hdfs_user_mode = 0755
+#for create_hdfs_directory
+security_param = "true" if security_enabled else "false"
+
+import functools
+#create partial functions with common arguments for every HdfsDirectory call
+#to create hdfs directory we need to call params.HdfsDirectory in code
+HdfsDirectory = functools.partial(
+  HdfsDirectory,
+  conf_dir = hadoop_conf_dir,
+  hdfs_user = hdfs_principal_name if security_enabled else hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
new file mode 100644
index 0000000..945a676
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import socket
+import sys
+
+from hcat_service_check import hcat_service_check
+from webhcat_service_check import webhcat_service_check
+
+class HiveServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    address=format("{hive_server_host}")
+    port=int(format("{hive_server_port}"))
+    print "Test connectivity to hive server"
+    if params.security_enabled:
+      kinitcmd=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
+    else:
+      kinitcmd=None
+
+    try:
+      check_thrift_port_sasl(address, port, params.hive_server2_authentication,
+                             params.hive_server_principal, kinitcmd, params.smokeuser)
+      print "Successfully connected to %s on port %s" % (address, port)
+    except:
+      print "Connection to %s on port %s failed" % (address, port)
+      exit(1)
+
+    hcat_service_check()
+    webhcat_service_check()
+
+if __name__ == "__main__":
+  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
new file mode 100644
index 0000000..e6f2514
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
+hive_pid = 'hive-server.pid'
+
+hive_metastore_pid = 'hive.pid'
+
+hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
+webhcat_pid_file = format('{hcat_pid_dir}/webhcat.pid')
+
+if System.get_instance().os_family == "suse" or System.get_instance().os_family == "ubuntu":
+  daemon_name = 'mysql'
+else:
+  daemon_name = 'mysqld'
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
new file mode 100644
index 0000000..c02bf74
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
@@ -0,0 +1,143 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+import os.path
+import glob
+
+from resource_management import *
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.dynamic_variable_interpretation import copy_tarballs_to_hdfs
+
+
+def webhcat():
+  import params
+
+  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, "2.2.0.0") < 0:
+    params.HdfsDirectory(params.webhcat_apps_dir,
+                         action="create_delayed",
+                         owner=params.webhcat_user,
+                         mode=0755
+    )
+  
+  if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
+    params.HdfsDirectory(params.hcat_hdfs_user_dir,
+                         action="create_delayed",
+                         owner=params.hcat_user,
+                         mode=params.hcat_hdfs_user_mode
+    )
+  params.HdfsDirectory(params.webhcat_hdfs_user_dir,
+                       action="create_delayed",
+                       owner=params.webhcat_user,
+                       mode=params.webhcat_hdfs_user_mode
+  )
+  params.HdfsDirectory(None, action="create")
+
+  Directory(params.templeton_pid_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.templeton_log_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.config_dir,
+            recursive=True,
+            owner=params.webhcat_user,
+            group=params.user_group)
+
+  if params.security_enabled:
+    kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
+  else:
+    kinit_if_needed = ""
+
+  if kinit_if_needed:
+    Execute(kinit_if_needed,
+            user=params.webhcat_user,
+            path='/bin'
+    )
+
+  # TODO, these checks that are specific to HDP 2.2 and greater should really be in a script specific to that stack.
+  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, "2.2.0.0") >= 0:
+    copy_tarballs_to_hdfs('hive', params.webhcat_user, params.hdfs_user, params.user_group)
+    copy_tarballs_to_hdfs('pig', params.webhcat_user, params.hdfs_user, params.user_group)
+    copy_tarballs_to_hdfs('hadoop-streaming', params.webhcat_user, params.hdfs_user, params.user_group)
+    copy_tarballs_to_hdfs('sqoop', params.webhcat_user, params.hdfs_user, params.user_group)
+  else:
+    CopyFromLocal(params.hadoop_streeming_jars,
+                  owner=params.webhcat_user,
+                  mode=0755,
+                  dest_dir=params.webhcat_apps_dir,
+                  kinnit_if_needed=kinit_if_needed,
+                  hdfs_user=params.hdfs_user,
+                  hadoop_bin_dir=params.hadoop_bin_dir,
+                  hadoop_conf_dir=params.hadoop_conf_dir
+    )
+
+    if (os.path.isfile(params.pig_tar_file)):
+      CopyFromLocal(params.pig_tar_file,
+                    owner=params.webhcat_user,
+                    mode=0755,
+                    dest_dir=params.webhcat_apps_dir,
+                    kinnit_if_needed=kinit_if_needed,
+                    hdfs_user=params.hdfs_user,
+                    hadoop_bin_dir=params.hadoop_bin_dir,
+                    hadoop_conf_dir=params.hadoop_conf_dir
+      )
+
+    CopyFromLocal(params.hive_tar_file,
+                  owner=params.webhcat_user,
+                  mode=0755,
+                  dest_dir=params.webhcat_apps_dir,
+                  kinnit_if_needed=kinit_if_needed,
+                  hdfs_user=params.hdfs_user,
+                  hadoop_bin_dir=params.hadoop_bin_dir,
+                  hadoop_conf_dir=params.hadoop_conf_dir
+    )
+
+    if (len(glob.glob(params.sqoop_tar_file)) > 0):
+      CopyFromLocal(params.sqoop_tar_file,
+                    owner=params.webhcat_user,
+                    mode=0755,
+                    dest_dir=params.webhcat_apps_dir,
+                    kinnit_if_needed=kinit_if_needed,
+                    hdfs_user=params.hdfs_user,
+                    hadoop_bin_dir=params.hadoop_bin_dir,
+                    hadoop_conf_dir=params.hadoop_conf_dir
+      )
+
+  XmlConfig("webhcat-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['webhcat-site'],
+            configuration_attributes=params.config['configuration_attributes']['webhcat-site'],
+            owner=params.webhcat_user,
+            group=params.user_group,
+            )
+
+  File(format("{config_dir}/webhcat-env.sh"),
+       owner=params.webhcat_user,
+       group=params.user_group,
+       content=InlineTemplate(params.webhcat_env_sh_template)
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
new file mode 100644
index 0000000..a8b3a8f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from webhcat import webhcat
+from webhcat_service import webhcat_service
+
+class WebHCatServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    webhcat()
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    webhcat_service(action = 'start')
+
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+
+    webhcat_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.webhcat_pid_file)
+
+if __name__ == "__main__":
+  WebHCatServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service.py
new file mode 100644
index 0000000..25a60e8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service.py
@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+
+def webhcat_service(action='start'):
+  import params
+
+  cmd = format('env HADOOP_HOME={hadoop_home} {webhcat_bin_dir}/webhcat_server.sh')
+
+  if action == 'start':
+    demon_cmd = format('{cmd} start')
+    no_op_test = format('ls {webhcat_pid_file} >/dev/null 2>&1 && ps -p `cat {webhcat_pid_file}` >/dev/null 2>&1')
+    Execute(demon_cmd,
+            user=params.webhcat_user,
+            not_if=no_op_test
+    )
+  elif action == 'stop':
+    demon_cmd = format('{cmd} stop')
+    Execute(demon_cmd,
+            user=params.webhcat_user
+    )
+    File(params.webhcat_pid_file,
+         action="delete",
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service_check.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service_check.py
new file mode 100644
index 0000000..8d15e47
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service_check.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def webhcat_service_check():
+  import params
+  File(format("{tmp_dir}/templetonSmoke.sh"),
+       content= StaticFile('templetonSmoke.sh'),
+       mode=0755
+  )
+
+  cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
+               " {security_param} {kinit_path_local}",
+               smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+          logoutput=True)
+
+
+


[06/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metrics.json
deleted file mode 100644
index 088626a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metrics.json
+++ /dev/null
@@ -1,7840 +0,0 @@
-{
-  "NAMENODE": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "dfs.FSNamesystem.TotalLoad",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotal": {
-            "metric": "dfs.FSNamesystem.CapacityTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsed": {
-            "metric": "dfs.FSNamesystem.CapacityUsed",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "dfs.FSNamesystem.CapacityRemaining",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-            "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
-            "pointInTime": false,
-            "temporal": true
-          },          
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "dfs.FSNamesystem.BlockCapacity",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "dfs.namenode.GetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesAppended": {
-            "metric": "dfs.namenode.FilesAppended",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "dfs.FSNamesystem.CapacityTotalGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "dfs.FSNamesystem.CapacityUsedGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "dfs.namenode.AddBlockOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesDeleted": {
-            "metric": "dfs.namenode.FilesDeleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "dfs.namenode.SyncsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "dfs.namenode.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "dfs.namenode.FilesCreated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesRenamed": {
-            "metric": "dfs.namenode.FilesRenamed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
-            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "dfs.namenode.GetBlockLocations",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "dfs.namenode.FileInfoOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/DeleteFileOps": {
-            "metric": "dfs.namenode.DeleteFileOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.rpc.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "dfs.namenode.FilesInGetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.rpc.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "dfs.namenode.SyncsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "dfs.FSNamesystem.FilesTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "dfs.FSNamesystem.ExcessBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "dfs.namenode.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/SafemodeTime": {
-            "metric": "dfs.namenode.SafemodeTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "dfs.FSNamesystem.BlocksTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "dfs.namenode.TransactionsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "dfs.namenode.TransactionsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "dfs.FSNamesystem.MissingBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "dfs.FSNamesystem.CorruptBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/fsImageLoadTime": {
-            "metric": "dfs.namenode.FsImageLoadTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "dfs.namenode.CreateFileOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/dfs/namenode/Used": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memMaxM":{
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/HostName": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/Safemode": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CorruptBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/LiveNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/DecomNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonDfsUsedSpace": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Safemode": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/DecomNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsed": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonHeapMemoryUsed": {
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/DeadNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/PercentUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Free": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Total": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/HeapMemoryMax": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingReplicationBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/UnderReplicatedBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/MissingBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingDeletionBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/BlocksTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/LiveNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/DeadNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/HeapMemoryUsed": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.FilesTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Version": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.ExcessBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonHeapMemoryMax": {
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.ScheduledReplicationBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlocksTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityTotal": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemainingGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.MissingBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Threads": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/NameDirStatuses": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logError": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/Version": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/NonDfsUsedSpace": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalBlocks": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityNonDFSUsed",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "dfs.FSNamesystem.TotalLoad",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotal": {
-            "metric": "dfs.FSNamesystem.CapacityTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsed": {
-            "metric": "dfs.FSNamesystem.CapacityUsed",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "dfs.FSNamesystem.CapacityRemaining",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-            "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "dfs.FSNamesystem.BlockCapacity",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "dfs.namenode.GetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesAppended": {
-            "metric": "dfs.namenode.FilesAppended",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "dfs.FSNamesystem.CapacityTotalGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "dfs.FSNamesystem.CapacityUsedGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "dfs.namenode.AddBlockOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesDeleted": {
-            "metric": "dfs.namenode.FilesDeleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "dfs.namenode.SyncsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "dfs.namenode.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "dfs.namenode.FilesCreated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesRenamed": {
-            "metric": "dfs.namenode.FilesRenamed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
-            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "dfs.namenode.GetBlockLocations",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "dfs.namenode.FileInfoOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/DeleteFileOps": {
-            "metric": "dfs.namenode.DeleteFileOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.rpc.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "dfs.namenode.FilesInGetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.rpc.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "dfs.namenode.SyncsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "dfs.FSNamesystem.FilesTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "dfs.FSNamesystem.ExcessBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "dfs.namenode.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/SafemodeTime": {
-            "metric": "dfs.namenode.SafemodeTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "dfs.FSNamesystem.BlocksTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "dfs.namenode.TransactionsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "dfs.namenode.TransactionsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "dfs.FSNamesystem.MissingBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "dfs.FSNamesystem.CorruptBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/fsImageLoadTime": {
-            "metric": "dfs.namenode.FsImageLoadTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "dfs.namenode.CreateFileOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/dfs/namenode/Used": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memMaxM":{
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.GetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/HostName": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.tag.Hostname",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/runtime/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.FsyncAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetFileInfoAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CompleteAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetBlockLocationsNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.AddBlockOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric":

<TRUNCATED>

[05/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/alert_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/alert_checkpoint_time.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/alert_checkpoint_time.py
deleted file mode 100644
index 410608f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/alert_checkpoint_time.py
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import time
-import urllib2
-import json
-
-LABEL = 'Last Checkpoint: [{h} hours, {m} minutes, {tx} transactions]'
-
-NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
-NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
-NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
-NN_CHECKPOINT_TX_KEY = '{{hdfs-site/dfs.namenode.checkpoint.txns}}'
-NN_CHECKPOINT_PERIOD_KEY = '{{hdfs-site/dfs.namenode.checkpoint.period}}'
-
-PERCENT_WARNING = 200
-PERCENT_CRITICAL = 200
-
-CHECKPOINT_TX_DEFAULT = 1000000
-CHECKPOINT_PERIOD_DEFAULT = 21600
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, 
-      NN_CHECKPOINT_TX_KEY, NN_CHECKPOINT_PERIOD_KEY)      
-  
-
-def execute(parameters=None, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  parameters (dictionary): a mapping of parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  if parameters is None:
-    return (('UNKNOWN', ['There were no parameters supplied to the script.']))
-  
-  uri = None
-  scheme = 'http'  
-  http_uri = None
-  https_uri = None
-  http_policy = 'HTTP_ONLY'
-  percent_warning = PERCENT_WARNING
-  percent_critical = PERCENT_CRITICAL
-  checkpoint_tx = CHECKPOINT_TX_DEFAULT
-  checkpoint_period = CHECKPOINT_PERIOD_DEFAULT
-  
-  if NN_HTTP_ADDRESS_KEY in parameters:
-    http_uri = parameters[NN_HTTP_ADDRESS_KEY]
-
-  if NN_HTTPS_ADDRESS_KEY in parameters:
-    https_uri = parameters[NN_HTTPS_ADDRESS_KEY]
-
-  if NN_HTTP_POLICY_KEY in parameters:
-    http_policy = parameters[NN_HTTP_POLICY_KEY]
-
-  if NN_CHECKPOINT_TX_KEY in parameters:
-    checkpoint_tx = parameters[NN_CHECKPOINT_TX_KEY]
-
-  if NN_CHECKPOINT_PERIOD_KEY in parameters:
-    checkpoint_period = parameters[NN_CHECKPOINT_PERIOD_KEY]
-    
-  # determine the right URI and whether to use SSL
-  uri = http_uri
-  if http_policy == 'HTTPS_ONLY':
-    scheme = 'https'
-    
-    if https_uri is not None:
-      uri = https_uri 
-  
-  current_time = int(round(time.time() * 1000))
-
-  last_checkpoint_time_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".format(scheme,uri)
-  journal_transaction_info_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme,uri)
-
-  # start out assuming an OK status
-  label = None
-  result_code = "OK"
-
-  try:
-    last_checkpoint_time = int(get_value_from_jmx(last_checkpoint_time_qry,"LastCheckpointTime"))
-    journal_transaction_info = get_value_from_jmx(journal_transaction_info_qry,"JournalTransactionInfo")
-    journal_transaction_info_dict = json.loads(journal_transaction_info)
-  
-    last_tx = int(journal_transaction_info_dict['LastAppliedOrWrittenTxId'])
-    most_recent_tx = int(journal_transaction_info_dict['MostRecentCheckpointTxId'])
-    transaction_difference = last_tx - most_recent_tx
-    
-    delta = (current_time - last_checkpoint_time)/1000
-
-    label = LABEL.format(h=get_time(delta)['h'], m=get_time(delta)['m'], tx=transaction_difference)
-    
-    if (transaction_difference > int(checkpoint_tx)) and (float(delta) / int(checkpoint_period)*100 >= int(percent_critical)):
-      result_code = 'CRITICAL'
-    elif (transaction_difference > int(checkpoint_tx)) and (float(delta) / int(checkpoint_period)*100 >= int(percent_warning)):
-      result_code = 'WARNING'
-
-  except Exception, e:
-    label = str(e)
-    result_code = 'UNKNOWN'
-        
-  return ((result_code, [label]))
-
-def get_time(delta):
-  h = int(delta/3600)
-  m = int((delta % 3600)/60)
-  return {'h':h, 'm':m}
-
-
-def get_value_from_jmx(qry, property):
-  response = urllib2.urlopen(qry)
-  data=response.read()
-  data_dict = json.loads(data)
-  return data_dict["beans"][0][property]

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/alert_ha_namenode_health.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/alert_ha_namenode_health.py
deleted file mode 100644
index fc1541d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/alert_ha_namenode_health.py
+++ /dev/null
@@ -1,166 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import urllib2
-import json
-
-RESULT_STATE_OK = 'OK'
-RESULT_STATE_CRITICAL = 'CRITICAL'
-RESULT_STATE_UNKNOWN = 'UNKNOWN'
-RESULT_STATE_SKIPPED = 'SKIPPED'
-
-HDFS_NN_STATE_ACTIVE = 'active'
-HDFS_NN_STATE_STANDBY = 'standby'
-
-HDFS_SITE_KEY = '{{hdfs-site}}'
-NAMESERVICE_KEY = '{{hdfs-site/dfs.nameservices}}'
-NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
-NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
-DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY,
-  NN_HTTPS_ADDRESS_KEY, DFS_POLICY_KEY)
-  
-
-def execute(parameters=None, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  parameters (dictionary): a mapping of parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-  if parameters is None:
-    return (RESULT_STATE_UNKNOWN, ['There were no parameters supplied to the script.'])
-
-  # if not in HA mode, then SKIP
-  if not NAMESERVICE_KEY in parameters:
-    return (RESULT_STATE_SKIPPED, ['NameNode HA is not enabled'])
-
-  # hdfs-site is required
-  if not HDFS_SITE_KEY in parameters:
-    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
-
-  # determine whether or not SSL is enabled
-  is_ssl_enabled = False
-  if DFS_POLICY_KEY in parameters:
-    dfs_policy = parameters[DFS_POLICY_KEY]
-    if dfs_policy == "HTTPS_ONLY":
-      is_ssl_enabled = True
-
-  name_service = parameters[NAMESERVICE_KEY]
-  hdfs_site = parameters[HDFS_SITE_KEY]
-
-  # look for dfs.ha.namenodes.foo
-  nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
-  if not nn_unique_ids_key in hdfs_site:
-    return (RESULT_STATE_UNKNOWN, ['Unable to find unique namenode alias key {0}'.format(nn_unique_ids_key)])
-
-  namenode_http_fragment = 'dfs.namenode.http-address.{0}.{1}'
-  jmx_uri_fragment = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"
-
-  if is_ssl_enabled:
-    namenode_http_fragment = 'dfs.namenode.https-address.{0}.{1}'
-    jmx_uri_fragment = "https://{0}/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"
-
-
-  active_namenodes = []
-  standby_namenodes = []
-  unknown_namenodes = []
-
-  # now we have something like 'nn1,nn2,nn3,nn4'
-  # turn it into dfs.namenode.[property].[dfs.nameservices].[nn_unique_id]
-  # ie dfs.namenode.http-address.hacluster.nn1
-  nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
-  for nn_unique_id in nn_unique_ids:
-    key = namenode_http_fragment.format(name_service,nn_unique_id)
-
-    if key in hdfs_site:
-      # use str() to ensure that unicode strings do not have the u' in them
-      value = str(hdfs_site[key])
-
-      try:
-        jmx_uri = jmx_uri_fragment.format(value)
-        state = get_value_from_jmx(jmx_uri,'State')
-
-        if state == HDFS_NN_STATE_ACTIVE:
-          active_namenodes.append(value)
-        elif state == HDFS_NN_STATE_STANDBY:
-          standby_namenodes.append(value)
-        else:
-          unknown_namenodes.append(value)
-      except:
-        unknown_namenodes.append(value)
-
-  # now that the request is done, determine if this host is the host that
-  # should report the status of the HA topology
-  is_active_namenode = False
-  for active_namenode in active_namenodes:
-    if active_namenode.startswith(host_name):
-      is_active_namenode = True
-
-  # there's only one scenario here; there is exactly 1 active and 1 standby
-  is_topology_healthy = len(active_namenodes) == 1 and len(standby_namenodes) == 1
-
-  result_label = 'Active{0}, Standby{1}, Unknown{2}'.format(str(active_namenodes),
-    str(standby_namenodes), str(unknown_namenodes))
-
-  # Healthy Topology:
-  #   - Active NN reports the alert, standby does not
-  #
-  # Unhealthy Topology:
-  #   - Report the alert if this is the first named host
-  #   - Report the alert if not the first named host, but the other host
-  #   could not report its status
-  if is_topology_healthy:
-    if is_active_namenode is True:
-      return (RESULT_STATE_OK, [result_label])
-    else:
-      return (RESULT_STATE_SKIPPED, ['Another host will report this alert'])
-  else:
-    # dfs.namenode.rpc-address.service.alias is guaranteed in HA mode
-    first_listed_host_key = 'dfs.namenode.rpc-address.{0}.{1}'.format(
-      name_service, nn_unique_ids[0])
-
-    first_listed_host = ''
-    if first_listed_host_key in hdfs_site:
-      first_listed_host = hdfs_site[first_listed_host_key]
-
-    is_first_listed_host = False
-    if first_listed_host.startswith(host_name):
-      is_first_listed_host = True
-
-    if is_first_listed_host:
-      return (RESULT_STATE_CRITICAL, [result_label])
-    else:
-      # not the first listed host, but the first host might be in the unknown
-      return (RESULT_STATE_SKIPPED, ['Another host will report this alert'])
-
-
-def get_value_from_jmx(qry, property):
-  response = urllib2.urlopen(qry)
-  data=response.read()
-  data_dict = json.loads(data)
-  return data_dict["beans"][0][property]

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
deleted file mode 100644
index 54405f6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export bin_dir=$1
-shift
-export old_mark_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  sudo rm -f ${mark_file}
-  sudo mkdir -p ${mark_dir}
-fi
-
-if [[ -d $old_mark_dir ]] ; then
-  mv ${old_mark_dir} ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    sudo su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:${bin_dir} ; yes Y | hdfs --config ${conf_dir} ${command}"
-    (( EXIT_CODE = $EXIT_CODE | $? ))
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py
deleted file mode 100644
index f8e9c1a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkWebUI.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-
-  for host in hosts:
-    try:
-      conn = httplib.HTTPConnection(host, port)
-      # This can be modified to get a partial url part to be sent with request
-      conn.request("GET", "/")
-      httpCode = conn.getresponse().status
-      conn.close()
-    except Exception:
-      httpCode = 404
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port
-      exit(1)
-      
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/__init__.py
deleted file mode 100644
index 35de4bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""


[26/37] ambari git commit: AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/alerts.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/alerts.json
deleted file mode 100644
index 32d4238..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/alerts.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
-  "OOZIE": {
-    "service": [],
-    "OOZIE_SERVER": [
-      {
-        "name": "oozie_server_webui",
-        "label": "Oozie Server Web UI",
-        "description": "This host-level alert is triggered if the Oozie server Web UI is unreachable.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{oozie-site/oozie.base.url}}/oozie"
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.4f} seconds"
-            },
-            "warning":{
-              "text": "HTTP {0} response in {2:.4f} seconds"
-            },
-            "critical": {
-              "text": "Connection failed to {1}"
-            }
-          }
-        }
-      },
-      {
-        "name": "oozie_server_status",
-        "label": "Oozie Server Status",
-        "description": "This host-level alert is triggered if the Oozie server cannot be determined to be up and responding to client requests.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDP/2.0.6/services/OOZIE/package/files/alert_check_oozie_server.py"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
deleted file mode 100644
index 65665bf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
+++ /dev/null
@@ -1,139 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>oozie_user</name>
-    <value>oozie</value>
-    <property-type>USER</property-type>
-    <description>Oozie User.</description>
-  </property>
-  <property>
-    <name>oozie_database</name>
-    <value>New Derby Database</value>
-    <description>Oozie Server Database.</description>
-  </property>
-  <property>
-    <name>oozie_derby_database</name>
-    <value>Derby</value>
-    <description>Oozie Derby Database</description>
-  </property>
-  <property>
-    <name>oozie_data_dir</name>
-    <value>/hadoop/oozie/data</value>
-    <description>Data directory in which the Oozie DB exists</description>
-  </property>
-  <property>
-    <name>oozie_log_dir</name>
-    <value>/var/log/oozie</value>
-    <description>Directory for oozie logs</description>
-  </property>
-  <property>
-    <name>oozie_pid_dir</name>
-    <value>/var/run/oozie</value>
-    <description>Directory in which the pid files for oozie reside.</description>
-  </property>
-  <property>
-    <name>oozie_admin_port</name>
-    <value>11001</value>
-    <description>The admin port Oozie server runs.</description>
-  </property>
-  <property>
-    <name>oozie_heapsize</name>
-    <value>2048</value>
-    <description>Oozie heap size.</description>
-  </property>
-  <property>
-    <name>oozie_permsize</name>
-    <value>256</value>
-    <description>Oozie permanent generation size.</description>
-  </property>
-
-  <!-- oozie-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for oozie-env.sh file</description>
-    <value>
-#!/bin/bash
-
-if [ -d "/usr/lib/bigtop-tomcat" ]; then
-  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}
-  export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}
-  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
-  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
-fi
-
-#Set JAVA HOME
-export JAVA_HOME={{java_home}}
-
-export JRE_HOME=${JAVA_HOME}
-
-# Set Oozie specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs Oozie
-# Java System properties for Oozie should be specified in this variable
-#
-export CATALINA_OPTS="$CATALINA_OPTS -Xmx{{oozie_heapsize}} -XX:MaxPermSize={{oozie_permsize}}"
-
-# Oozie configuration file to load from Oozie configuration directory
-#
-# export OOZIE_CONFIG_FILE=oozie-site.xml
-
-# Oozie logs directory
-#
-export OOZIE_LOG={{oozie_log_dir}}
-
-# Oozie pid directory
-#
-export CATALINA_PID={{pid_file}}
-
-#Location of the data for oozie
-export OOZIE_DATA={{oozie_data_dir}}
-
-# Oozie Log4J configuration file to load from Oozie configuration directory
-#
-# export OOZIE_LOG4J_FILE=oozie-log4j.properties
-
-# Reload interval of the Log4J configuration file, in seconds
-#
-# export OOZIE_LOG4J_RELOAD=10
-
-# The port Oozie server runs
-#
-export OOZIE_HTTP_PORT={{oozie_server_port}}
-
-# The admin port Oozie server runs
-#
-export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
-
-# The host name Oozie server runs on
-#
-# export OOZIE_HTTP_HOSTNAME=`hostname -f`
-
-# The base URL for callback URLs to Oozie
-#
-# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-log4j.xml
deleted file mode 100644
index 7f7158f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-log4j.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=INFO, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=INFO, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index e72a8be..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,312 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration supports_final="true">
-
-  <!--
-      Refer to the oozie-default.xml file for the complete list of
-      Oozie configuration properties and their default values.
-  -->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-  </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-      The Oozie system ID.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.systemmode</name>
-    <value>NORMAL</value>
-    <description>
-      System mode for Oozie at startup.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.AuthorizationService.security.enabled</name>
-    <value>true</value>
-    <description>
-      Specifies whether security (user name/admin role) is enabled or not.
-      If disabled any user can manage Oozie system and manage any job.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.older.than</name>
-    <value>30</value>
-    <description>
-      Jobs older than this value, in days, will be purged by the PurgeService.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.PurgeService.purge.interval</name>
-    <value>3600</value>
-    <description>
-      Interval at which the purge service will run, in seconds.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.queue.size</name>
-    <value>1000</value>
-    <description>Max callable queue size</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.threads</name>
-    <value>10</value>
-    <description>Number of threads used for executing callables</description>
-  </property>
-
-  <property>
-    <name>oozie.service.CallableQueueService.callable.concurrency</name>
-    <value>3</value>
-    <description>
-      Maximum concurrency for a given callable type.
-      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-      All commands that use action executors (action-start, action-end, action-kill and action-check) use
-      the action type as the callable type.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.coord.normal.default.timeout</name>
-    <value>120</value>
-    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.db.schema.name</name>
-    <value>oozie</value>
-    <description>
-      Oozie DataBase Name
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.authentication.type</name>
-    <value>simple</value>
-    <description>
-      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
-      #AUTHENTICATION_HANDLER_CLASSNAME#.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.WorkflowAppService.system.libpath</name>
-    <value>/user/${user.name}/share/lib</value>
-    <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-    </description>
-  </property>
-
-  <property>
-    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-    <value>false</value>
-    <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-    </description>
-  </property>
-  <property>
-    <name>oozie.authentication.kerberos.name.rules</name>
-    <value>
-      RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-      RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-      RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-      RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-      DEFAULT
-    </value>
-    <description>The mapping from kerberos principal names to local OS user names.</description>
-  </property>
-  <property>
-    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-    <value>*=/etc/hadoop/conf</value>
-    <description>
-      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-      the relevant Hadoop *-site.xml files. If the path is relative is looked within
-      the Oozie configuration directory; though the path can be absolute (i.e. to point
-      to Hadoop client conf/ directories in the local filesystem.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.ActionService.executor.ext.classes</name>
-    <value>
-      org.apache.oozie.action.email.EmailActionExecutor,
-      org.apache.oozie.action.hadoop.HiveActionExecutor,
-      org.apache.oozie.action.hadoop.ShellActionExecutor,
-      org.apache.oozie.action.hadoop.SqoopActionExecutor,
-      org.apache.oozie.action.hadoop.DistcpActionExecutor
-    </value>
-    <description>
-      List of ActionExecutors extension classes (separated by commas). Only action types with associated executors can
-      be used in workflows. This property is a convenience property to add extensions to the built in executors without
-      having to include all the built in ones.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.SchemaService.wf.ext.schemas</name>
-    <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd</value>
-    <description>
-      Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
-      trims the value, if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.JPAService.create.db.schema</name>
-    <value>false</value>
-    <description>
-      Creates Oozie DB.
-
-      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.driver</name>
-    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-    <description>
-      JDBC driver class.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.url</name>
-    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-    <description>
-      JDBC URL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.jdbc.username</name>
-    <value>oozie</value>
-    <description>
-      Database user name to use to connect to the database
-    </description>
-  </property>
-
-  <property require-input = "true">
-    <name>oozie.service.JPAService.jdbc.password</name>
-    <value> </value>
-    <property-type>PASSWORD</property-type>
-    <description>
-      DB user password.
-
-      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-      if empty Configuration assumes it is NULL.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.service.JPAService.pool.max.active.conn</name>
-    <value>10</value>
-    <description>
-      Max number of connections.
-    </description>
-  </property>
-
-  <property>
-    <name>oozie.services</name>
-    <value>
-      org.apache.oozie.service.SchedulerService,
-      org.apache.oozie.service.InstrumentationService,
-      org.apache.oozie.service.CallableQueueService,
-      org.apache.oozie.service.UUIDService,
-      org.apache.oozie.service.ELService,
-      org.apache.oozie.service.AuthorizationService,
-      org.apache.oozie.service.UserGroupInformationService,
-      org.apache.oozie.service.HadoopAccessorService,
-      org.apache.oozie.service.URIHandlerService,
-      org.apache.oozie.service.MemoryLocksService,
-      org.apache.oozie.service.DagXLogInfoService,
-      org.apache.oozie.service.SchemaService,
-      org.apache.oozie.service.LiteWorkflowAppService,
-      org.apache.oozie.service.JPAService,
-      org.apache.oozie.service.StoreService,
-      org.apache.oozie.service.CoordinatorStoreService,
-      org.apache.oozie.service.SLAStoreService,
-      org.apache.oozie.service.DBLiteWorkflowStoreService,
-      org.apache.oozie.service.CallbackService,
-      org.apache.oozie.service.ActionService,
-      org.apache.oozie.service.ActionCheckerService,
-      org.apache.oozie.service.RecoveryService,
-      org.apache.oozie.service.PurgeService,
-      org.apache.oozie.service.CoordinatorEngineService,
-      org.apache.oozie.service.BundleEngineService,
-      org.apache.oozie.service.DagEngineService,
-      org.apache.oozie.service.CoordMaterializeTriggerService,
-      org.apache.oozie.service.StatusTransitService,
-      org.apache.oozie.service.PauseTransitService,
-      org.apache.oozie.service.GroupsService,
-      org.apache.oozie.service.ProxyUserService
-    </value>
-    <description>List of Oozie services</description>
-  </property>
-  <property>
-    <name>oozie.service.URIHandlerService.uri.handlers</name>
-    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
-    <description>
-      Enlist the different uri handlers supported for data availability checks.
-    </description>
-  </property>
-  <property>
-    <name>oozie.services.ext</name>
-    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
-    </value>
-    <description>
-      To add/replace services defined in 'oozie.services' with custom implementations.
-      Class names must be separated by commas.
-    </description>
-  </property>
-  <property>
-    <name>oozie.service.coord.push.check.requeue.interval</name>
-    <value>30000</value>
-    <description>
-      Command re-queue interval for push dependencies (in millisecond).
-    </description>
-  </property>
-  <property>
-    <name>oozie.credentials.credentialclasses</name>
-    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
-    <description>
-      Credential Class to be used for HCat.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
index ec66213..dad6dc1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
@@ -20,146 +20,7 @@
   <services>
     <service>
       <name>OOZIE</name>
-      <displayName>Oozie</displayName>
-      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
-      </comment>
-      <version>4.0.0.2.0</version>
-      <components>
-        <component>
-          <name>OOZIE_SERVER</name>
-          <displayName>Oozie Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>OOZIE_CLIENT</name>
-          <displayName>Oozie Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>oozie-site.xml</fileName>
-              <dictionaryName>oozie-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>oozie-env.sh</fileName>
-              <dictionaryName>oozie-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>oozie-log4j.properties</fileName>
-              <dictionaryName>oozie-log4j</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>oozie</name>
-            </package>
-            <package>
-              <name>oozie-client</name>
-            </package>
-            <package>
-              <name>mysql-connector-java</name>
-            </package>
-          </packages>
-        </osSpecific>
-        
-       <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>extjs-2.2-1</name>
-            </package>
-          </packages>
-        </osSpecific>
-        
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>extjs</name>
-            </package>
-            <package>
-              <name>libxml2-utils</name>
-            </package>
-          </packages>
-        </osSpecific>
-        
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>oozie-site</config-type>
-        <config-type>oozie-env</config-type>
-        <config-type>oozie-log4j</config-type>
-        <config-type>yarn-site</config-type>
-      </configuration-dependencies>
+      <extends>common-services/OOZIE/4.0.0.2.0</extends>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/alert_check_oozie_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/alert_check_oozie_server.py
deleted file mode 100644
index 7bf1255..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/alert_check_oozie_server.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import subprocess
-from subprocess import CalledProcessError
-
-RESULT_CODE_OK = 'OK'
-RESULT_CODE_CRITICAL = 'CRITICAL'
-RESULT_CODE_UNKNOWN = 'UNKNOWN'
-
-OOZIE_URL_KEY = '{{oozie-site/oozie.base.url}}'
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (OOZIE_URL_KEY)
-  
-
-def execute(parameters=None, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  parameters (dictionary): a mapping of parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  if parameters is None:
-    return (RESULT_CODE_UNKNOWN, ['There were no parameters supplied to the script.'])
-
-  oozie_url = None
-  if OOZIE_URL_KEY in parameters:
-    oozie_url = parameters[OOZIE_URL_KEY]
-
-  if oozie_url is None:
-    return (RESULT_CODE_UNKNOWN, ['The Oozie URL is a required parameter.'])
-
-  try:
-    # oozie admin -oozie http://server:11000/oozie -status
-    oozie_process = subprocess.Popen(['oozie', 'admin', '-oozie',
-      oozie_url, '-status'], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
-
-    oozie_output, oozie_error = oozie_process.communicate()
-    oozie_return_code = oozie_process.returncode
-
-    if oozie_return_code == 0:
-      # strip trailing newlines
-      oozie_output = str(oozie_output).strip('\n')
-      return (RESULT_CODE_OK, [oozie_output])
-    else:
-      oozie_error = str(oozie_error).strip('\n')
-      return (RESULT_CODE_CRITICAL, [oozie_error])
-
-  except CalledProcessError, cpe:
-    return (RESULT_CODE_CRITICAL, [str(cpe)])

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh
deleted file mode 100644
index 30d878c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-os_family=$1
-shift
-
-function getValueFromField {
-  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
-  return $?
-}
-
-function checkOozieJobStatus {
-  local job_id=$1
-  local num_of_tries=$2
-  #default num_of_tries to 10 if not present
-  num_of_tries=${num_of_tries:-10}
-  local i=0
-  local rc=1
-  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
-  sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"
-  while [ $i -lt $num_of_tries ] ; do
-    cmd_output=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
-    (IFS='';echo $cmd_output)
-    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
-    echo "workflow_status=$act_status"
-    if [ "RUNNING" == "$act_status" ]; then
-      #increment the counter and get the status again after waiting for 15 secs
-      sleep 15
-      (( i++ ))
-      elif [ "SUCCEEDED" == "$act_status" ]; then
-        rc=0;
-        break;
-      else
-        rc=1
-        break;
-      fi
-    done
-    return $rc
-}
-
-export oozie_conf_dir=$1
-export oozie_bin_dir=$2
-export hadoop_conf_dir=$3
-export hadoop_bin_dir=$4
-export smoke_test_user=$5
-export security_enabled=$6
-export smoke_user_keytab=$7
-export kinit_path_local=$8
-
-export OOZIE_EXIT_CODE=0
-export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml yarn.resourcemanager.address`
-export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.defaultFS`
-export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
-
-if [ "$os_family" == "ubuntu" ] ; then
-  LIST_PACKAGE_FILES_CMD='dpkg-query -L'
-else
-  LIST_PACKAGE_FILES_CMD='rpm -ql'
-fi
-  
-
-export OOZIE_EXAMPLES_DIR=`$LIST_PACKAGE_FILES_CMD oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
-if [[ -z "$OOZIE_EXAMPLES_DIR" ]] ; then
-  export OOZIE_EXAMPLES_DIR='/usr/hdp/current/oozie-client/doc/'
-fi
-cd $OOZIE_EXAMPLES_DIR
-
-sudo tar -zxf oozie-examples.tar.gz
-sudo chmod -R o+rx examples
-
-sudo sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sudo sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sudo sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sudo sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sudo sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sudo sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
-
-if [[ $security_enabled == "True" ]]; then
-  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else 
-  kinitcmd=""
-fi
-
-sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r examples"
-sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r input-data"
-sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
-
-cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
-echo $cmd
-job_info=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd" | grep "job:"`
-job_id="`echo $job_info | cut -d':' -f2`"
-checkOozieJobStatus "$job_id" 15
-OOZIE_EXIT_CODE="$?"
-exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/wrap_ooziedb.sh
deleted file mode 100644
index 36576b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/wrap_ooziedb.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
-EC=$?
-echo $OUT
-GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
-if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
-then
-  exit 0
-else
-  exit $EC
-fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py
deleted file mode 100644
index a760ab2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management import *
-
-def oozie(is_server=False # TODO: see if see can remove this
-              ):
-  import params
-
-  if is_server:
-    params.HdfsDirectory(params.oozie_hdfs_user_dir,
-                         action="create",
-                         owner=params.oozie_user,
-                         mode=params.oozie_hdfs_user_mode
-    )
-  Directory(params.conf_dir,
-             recursive = True,
-             owner = params.oozie_user,
-             group = params.user_group
-  )
-  XmlConfig("oozie-site.xml",
-    conf_dir = params.conf_dir,
-    configurations = params.config['configurations']['oozie-site'],
-    configuration_attributes=params.config['configuration_attributes']['oozie-site'],
-    owner = params.oozie_user,
-    group = params.user_group,
-    mode = 0664
-  )
-  File(format("{conf_dir}/oozie-env.sh"),
-    owner=params.oozie_user,
-    content=InlineTemplate(params.oozie_env_sh_template)
-  )
-
-  if (params.log4j_props != None):
-    File(format("{params.conf_dir}/oozie-log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.oozie_user,
-      content=params.log4j_props
-    )
-  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
-    File(format("{params.conf_dir}/oozie-log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.oozie_user
-    )
-
-  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
-    File(format("{params.conf_dir}/adminusers.txt"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.oozie_user,
-      content=Template('adminusers.txt.j2', oozie_user=params.oozie_user)
-    )
-  else:
-    File ( format("{params.conf_dir}/adminusers.txt"),
-           owner = params.oozie_user,
-           group = params.user_group
-    )
-
-  environment = {
-    "no_proxy": format("{ambari_server_hostname}")
-  }
-
-  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
-     params.jdbc_driver_name == "org.postgresql.Driver" or \
-     params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-    Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
-    curl -kf -x \"\" \
-    --retry 5 {jdk_location}{check_db_connection_jar_name}\
-     -o {check_db_connection_jar_name}'"),
-      not_if  = format("[ -f {check_db_connection_jar} ]"),
-      environment=environment
-    )
-  pass
-
-  oozie_ownership()
-  
-  if is_server:      
-    oozie_server_specific()
-  
-def oozie_ownership():
-  import params
-  
-  File ( format("{conf_dir}/hadoop-config.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/oozie-default.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  Directory ( format("{conf_dir}/action-conf"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/action-conf/hive.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-  
-def oozie_server_specific():
-  import params
-  
-  File(params.pid_file,
-    action="delete",
-    not_if="ls {pid_file} >/dev/null 2>&1 && !(ps `cat {pid_file}` >/dev/null 2>&1)"
-  )
-  
-  oozie_server_directorties = [format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
-  Directory( oozie_server_directorties,
-    owner = params.oozie_user,
-    group = params.user_group,
-    mode = 0755,
-    recursive = True,
-    recursive_permission=True
-  )
-  
-  Directory(params.oozie_libext_dir,
-            recursive=True,
-  )
-  
-  configure_cmds = []  
-  configure_cmds.append(('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home))
-  configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir))
-  configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
-  configure_cmds.append(('chown', '-RL', format('{oozie_user}:{user_group}'), params.oozie_webapps_conf_dir))
-  
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
-  Execute( configure_cmds,
-    not_if  = no_op_test,
-    sudo = True,
-  )
-
-  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
-    Execute(('cp', params.jdbc_driver_jar, params.oozie_libext_dir),
-      not_if  = no_op_test,
-      sudo = True,
-    )
-  #falcon el extension
-  if params.has_falcon_host:
-    Execute(format('sudo cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'),
-      not_if  = no_op_test,
-    )
-    Execute(format('sudo chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
-      not_if  = no_op_test,
-    )
-  if params.lzo_enabled:
-    Package(params.lzo_packages_for_current_host)
-    Execute(format('sudo cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
-      not_if  = no_op_test,
-    )
-
-  Execute(format("cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war"),
-    user = params.oozie_user,
-    not_if  = no_op_test
-  )
-
-  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
-    # Create hive-site and tez-site configs for oozie
-    Directory(params.hive_conf_dir,
-        recursive = True,
-        owner = params.oozie_user,
-        group = params.user_group
-    )
-    if 'hive-site' in params.config['configurations']:
-      XmlConfig("hive-site.xml",
-        conf_dir=params.hive_conf_dir,
-        configurations=params.config['configurations']['hive-site'],
-        configuration_attributes=params.config['configuration_attributes']['hive-site'],
-        owner=params.oozie_user,
-        group=params.user_group,
-        mode=0644
-    )
-    if 'tez-site' in params.config['configurations']:
-      XmlConfig( "tez-site.xml",
-        conf_dir = params.hive_conf_dir,
-        configurations = params.config['configurations']['tez-site'],
-        configuration_attributes=params.config['configuration_attributes']['tez-site'],
-        owner = params.oozie_user,
-        group = params.user_group,
-        mode = 0664
-    )
-  pass
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_client.py
deleted file mode 100644
index f77a8db..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=False)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-    
-if __name__ == "__main__":
-  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_server.py
deleted file mode 100644
index f07e36d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_server.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=True)
-    
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    #TODO remove this when config command will be implemented
-    self.configure(env)
-    oozie_service(action='start')
-    
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    oozie_service(action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_file)
-    
-if __name__ == "__main__":
-  OozieServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py
deleted file mode 100644
index a1d7bad..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from resource_management import *
-
-def oozie_service(action = 'start'): # 'start' or 'stop'
-  import params
-
-  kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
-  
-  if action == 'start':
-    start_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-start.sh")
-    
-    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
-       params.jdbc_driver_name == "org.postgresql.Driver" or \
-       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
-    else:
-      db_connection_check_command = None
-      
-    cmd1 =  format("cd {oozie_tmp_dir} && {oozie_home}/bin/ooziedb.sh create -sqlfile oozie.sql -run")
-    cmd2 =  format("{kinit_if_needed} {put_shared_lib_to_hdfs_cmd} ; hadoop --config {hadoop_conf_dir} dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
-
-    if not os.path.isfile(params.jdbc_driver_jar) and params.jdbc_driver_name == "org.postgresql.Driver":
-      print format("ERROR: jdbc file {jdbc_driver_jar} is unavailable. Please, follow next steps:\n" \
-        "1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
-        "3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
-        "{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
-        "/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
-      exit(1)
-
-    if db_connection_check_command:
-      Execute( db_connection_check_command, tries=5, try_sleep=10)
-                  
-    Execute( cmd1,
-      user = params.oozie_user,
-      not_if  = no_op_test,
-      ignore_failures = True
-    ) 
-    
-    Execute( cmd2,
-      user = params.oozie_user,
-      not_if = format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
-      path = params.execute_path
-    )
-    
-    Execute( start_cmd,
-      user = params.oozie_user,
-      not_if  = no_op_test,
-    )
-  elif action == 'stop':
-    stop_cmd  = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-stop.sh")
-    Execute(stop_cmd,
-      only_if  = no_op_test,
-      user = params.oozie_user
-    )
-    File(params.pid_file,
-         action = "delete",
-    )
-
-  
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
deleted file mode 100644
index b5f2ea7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
-import status_params
-import itertools
-import os
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
-  hadoop_lib_home = "/usr/hdp/current/hadoop-client/lib"
-  oozie_lib_dir = "/usr/hdp/current/oozie-client/"
-  oozie_setup_sh = "/usr/hdp/current/oozie-client/bin/oozie-setup.sh"
-  oozie_webapps_dir = "/usr/hdp/current/oozie-client/oozie-server/webapps"
-  oozie_webapps_conf_dir = "/usr/hdp/current/oozie-client/oozie-server/conf"
-  oozie_libext_dir = "/usr/hdp/current/oozie-client/libext"
-  oozie_server_dir = "/usr/hdp/current/oozie-client/oozie-server"
-  oozie_shared_lib = "/usr/hdp/current/oozie-client/share"
-  oozie_home = "/usr/hdp/current/oozie-client"
-  oozie_bin_dir = "/usr/hdp/current/oozie-client/bin"
-  falcon_home = '/usr/hdp/current/falcon-client'
-else:
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_lib_home = "/usr/lib/hadoop/lib"
-  oozie_lib_dir = "/var/lib/oozie/"
-  oozie_setup_sh = "/usr/lib/oozie/bin/oozie-setup.sh"
-  oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
-  oozie_webapps_conf_dir = "/var/lib/oozie/oozie-server/conf"
-  oozie_libext_dir = "/usr/lib/oozie/libext"
-  oozie_server_dir = "/var/lib/oozie/oozie-server"
-  oozie_shared_lib = "/usr/lib/oozie/share"
-  oozie_home = "/usr/lib/oozie"
-  oozie_bin_dir = "/usr/bin"
-  falcon_home = '/usr/lib/falcon'
-
-execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-conf_dir = "/etc/oozie/conf"
-hive_conf_dir = "/etc/oozie/conf/action-conf/hive"
-oozie_user = config['configurations']['oozie-env']['oozie_user']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-user_group = config['configurations']['cluster-env']['user_group']
-jdk_location = config['hostLevelParams']['jdk_location']
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-oozie_tmp_dir = "/var/tmp/oozie"
-oozie_hdfs_user_dir = format("/user/{oozie_user}")
-oozie_pid_dir = status_params.oozie_pid_dir
-pid_file = status_params.pid_file
-hadoop_jar_location = "/usr/lib/hadoop/"
-# for HDP1 it's "/usr/share/HDP-oozie/ext.zip"
-ext_js_file = "ext-2.2.zip"
-ext_js_path = format("/usr/share/HDP-oozie/{ext_js_file}")
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-oozie_heapsize = config['configurations']['oozie-env']['oozie_heapsize']
-oozie_permsize = config['configurations']['oozie-env']['oozie_permsize']
-
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
-oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-oozie_keytab = config['configurations']['oozie-env']['oozie_keytab']
-oozie_env_sh_template = config['configurations']['oozie-env']['content']
-
-oracle_driver_jar_name = "ojdbc6.jar"
-
-java_home = config['hostLevelParams']['java_home']
-oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
-oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
-oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
-oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
-oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
-oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
-oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
-fs_root = config['configurations']['core-site']['fs.defaultFS']
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.0') >= 0 and compare_versions(hdp_stack_version, '2.2') < 0:
-  put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
-# for newer
-else:
-  put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
-  
-jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
-
-if jdbc_driver_name == "com.mysql.jdbc.Driver":
-  jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-elif jdbc_driver_name == "org.postgresql.Driver":
-  jdbc_driver_jar = format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar")
-elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-  jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
-else:
-  jdbc_driver_jar = ""
-
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-has_falcon_host = not len(falcon_host)  == 0
-
-#oozie-log4j.properties
-if (('oozie-log4j' in config['configurations']) and ('content' in config['configurations']['oozie-log4j'])):
-  log4j_props = config['configurations']['oozie-log4j']['content']
-else:
-  log4j_props = None
-
-oozie_hdfs_user_mode = 0775
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)
-
-#LZO support
-
-io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
-lzo_enabled = "com.hadoop.compression.lzo" in io_compression_codecs
-# stack_is_hdp22_or_further
-underscorred_version = stack_version_unformatted.replace('.', '_')
-dashed_version = stack_version_unformatted.replace('.', '-')
-lzo_packages_to_family = {
-  "any": ["hadoop-lzo"],
-  "redhat": ["lzo", "hadoop-lzo-native"],
-  "suse": ["lzo", "hadoop-lzo-native"],
-  "ubuntu": ["liblzo2-2"]
-}
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  lzo_packages_to_family["redhat"] += [format("hadooplzo_{underscorred_version}_*")]
-  lzo_packages_to_family["suse"] += [format("hadooplzo_{underscorred_version}_*")]
-  lzo_packages_to_family["ubuntu"] += [format("hadooplzo_{dashed_version}_*")]
-
-lzo_packages_for_current_host = lzo_packages_to_family['any'] + lzo_packages_to_family[System.get_instance().os_family]
-all_lzo_packages = set(itertools.chain(*lzo_packages_to_family.values()))
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/service_check.py
deleted file mode 100644
index 40f8b8d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/service_check.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class OozieServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    # on HDP1 this file is different
-    smoke_test_file_name = 'oozieSmoke2.sh'
-
-    oozie_smoke_shell_file( smoke_test_file_name)
-  
-def oozie_smoke_shell_file(
-  file_name
-):
-  import params
-
-  File( format("{tmp_dir}/{file_name}"),
-    content = StaticFile(file_name),
-    mode = 0755
-  )
-  
-  os_family = System.get_instance().os_family
-  
-  if params.security_enabled:
-    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
-  else:
-    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}")
-
-  Execute( format("{tmp_dir}/{file_name}"),
-    command   = sh_cmd,
-    path      = params.execute_path,
-    tries     = 3,
-    try_sleep = 5,
-    logoutput = True
-  )
-    
-if __name__ == "__main__":
-  OozieServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py
deleted file mode 100644
index a665449..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
-pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/adminusers.txt.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/adminusers.txt.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/adminusers.txt.j2
deleted file mode 100644
index 9feae39..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/adminusers.txt.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Users should be set using following rules:
-#
-#     One user name per line
-#     Empty lines and lines starting with '#' are ignored
-
-{{oozie_user}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-log4j.properties.j2
deleted file mode 100644
index 8c9f25e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/templates/oozie-log4j.properties.j2
+++ /dev/null
@@ -1,92 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=INFO, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=INFO, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
index fb4f9c9..6c9752a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
@@ -21,12 +21,16 @@ from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 
 class TestHcatClient(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hcat_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hcat_client.py",
                        classname = "HCatClient",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/hive/conf',
                               owner = 'hcat',
@@ -60,10 +64,12 @@ class TestHcatClient(RMFTestCase):
 
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hcat_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hcat_client.py",
                          classname = "HCatClient",
                          command = "configure",
-                         config_file="secured.json"
+                         config_file="secured.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/hive/conf',
                               recursive = True,

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
index 2a20225..0e2fc9e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
@@ -21,12 +21,16 @@ from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 
 class TestHiveClient(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_client.py",
                        classname = "HiveClient",
                        command = "configure",
-                       config_file="default_client.json"
+                       config_file="default_client.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/hive',
         mode = 0755
@@ -119,10 +123,12 @@ class TestHiveClient(RMFTestCase):
 
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_client.py",
                        classname = "HiveClient",
                        command = "configure",
-                       config_file="secured_client.json"
+                       config_file="secured_client.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/hive',
         mode = 0755

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
index ac07975..853ef3e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
@@ -22,20 +22,26 @@ from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 
 class TestHiveMetastore(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "start",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assert_configure_default()
@@ -53,10 +59,12 @@ class TestHiveMetastore(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "stop",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive.pid`',
@@ -68,19 +76,23 @@ class TestHiveMetastore(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertNoMoreResources()
 
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "start",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assert_configure_secured()
@@ -98,10 +110,12 @@ class TestHiveMetastore(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "stop",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive.pid`',


[27/37] ambari git commit: AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh
deleted file mode 100644
index d1e2ff1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hcatSmoke.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-
-case "$2" in
-
-prepare)
-  hcat -e "show tables"
-  hcat -e "drop table IF EXISTS ${tablename}"
-  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
-;;
-
-cleanup)
-  hcat -e "drop table IF EXISTS ${tablename}"
-;;
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh
deleted file mode 100644
index f9f2020..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveSmoke.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
-echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql
deleted file mode 100644
index 99a3865..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
-DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh
deleted file mode 100644
index 77d7b3e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/hiveserver2Smoke.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
-
-if [ "x$smokeout" == "x" ]; then
-  echo "Smoke test of hiveserver2 passed"
-  exit 0
-else
-  echo "Smoke test of hiveserver2 wasnt passed"
-  echo $smokeout
-  exit 1
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh
deleted file mode 100644
index 2e90ac0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/removeMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/removeMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/removeMysqlUser.sh
deleted file mode 100644
index b035517..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/removeMysqlUser.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-userhost=$3
-myhostname=$(hostname -f)
-sudo_prefix = "sudo -H -E"
-
-$sudo_prefix service $mysqldservice start
-echo "Removing user $mysqldbuser@$userhost"
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"DROP USER '$mysqldbuser'@'$userhost';\""
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
-$sudo_prefix service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh
deleted file mode 100644
index da0f60b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/startMetastore.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 hive --service metastore -hiveconf hive.log.file=hivemetastore.log -hiveconf hive.log.dir=$5 > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/templetonSmoke.sh
deleted file mode 100644
index 22202ee..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/templetonSmoke.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export ttonhost=$1
-export smoke_test_user=$2
-export smoke_user_keytab=$3
-export security_enabled=$4
-export kinit_path_local=$5
-export ttonurl="http://${ttonhost}:50111/templeton/v1"
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else
-  kinitcmd=""
-fi
-
-export no_proxy=$ttonhost
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
-retVal=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0
-
-#try hcat ddl command
-echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit  1
-fi
-
-# NOT SURE?? SUHAS
-if [[ $security_enabled == "true" ]]; then
-  echo "Templeton Pig Smoke Tests not run in secure mode"
-  exit 0
-fi
-
-#try pig query
-outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
-ttonTestOutput="/tmp/idtest.${outname}.out";
-ttonTestInput="/tmp/idtest.${outname}.in";
-ttonTestScript="idtest.${outname}.pig"
-
-echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
-echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
-echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
-
-#copy pig script to hdfs
-sudo su ${smoke_test_user} -s /bin/bash - -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
-
-#copy input file to hdfs
-sudo su ${smoke_test_user} -s /bin/bash - -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
-
-#create, copy post args file
-echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
-
-#submit pig query
-cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
-retVal=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
deleted file mode 100644
index 31c1673..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-
-def hcat():
-  import params
-
-  Directory(params.hive_conf_dir,
-            recursive=True,
-            owner=params.hcat_user,
-            group=params.user_group,
-  )
-
-
-  Directory(params.hcat_conf_dir,
-            recursive=True,
-            owner=params.hcat_user,
-            group=params.user_group,
-  )
-
-  Directory(params.hcat_pid_dir,
-            owner=params.webhcat_user,
-            recursive=True
-  )
-
-  XmlConfig("hive-site.xml",
-            conf_dir=params.hive_client_conf_dir,
-            configurations=params.config['configurations']['hive-site'],
-            configuration_attributes=params.config['configuration_attributes']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-
-  File(format("{hcat_conf_dir}/hcat-env.sh"),
-       owner=params.hcat_user,
-       group=params.user_group,
-       content=InlineTemplate(params.hcat_env_sh_template)
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py
deleted file mode 100644
index 8b5921a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from hcat import hcat
-
-class HCatClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-
-    hcat()
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-if __name__ == "__main__":
-  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
deleted file mode 100644
index fd4c6ca..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions import get_unique_id_and_date
-
-def hcat_service_check():
-    import params
-    unique = get_unique_id_and_date()
-    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
-    test_cmd = format("fs -test -e {output_file}")
-
-    if params.security_enabled:
-      kinit_cmd = format(
-        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
-    else:
-      kinit_cmd = ""
-
-    File(format("{tmp_dir}/hcatSmoke.sh"),
-         content=StaticFile("hcatSmoke.sh"),
-         mode=0755
-    )
-
-    prepare_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} prepare")
-
-    Execute(prepare_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', params.execute_path],
-            logoutput=True)
-
-    if params.security_enabled:
-      ExecuteHadoop(test_cmd,
-                    user=params.hdfs_user,
-                    logoutput=True,
-                    conf_dir=params.hadoop_conf_dir,
-                    security_enabled=params.security_enabled,
-                    kinit_path_local=params.kinit_path_local,
-                    keytab=params.hdfs_user_keytab,
-                    principal=params.hdfs_principal_name,
-                    bin_dir=params.execute_path
-      )
-    else:
-      ExecuteHadoop(test_cmd,
-                    user=params.hdfs_user,
-                    logoutput=True,
-                    conf_dir=params.hadoop_conf_dir,
-                    security_enabled=params.security_enabled,
-                    kinit_path_local=params.kinit_path_local,
-                    keytab=params.hdfs_user_keytab,
-                    bin_dir=params.execute_path
-      )
-
-    cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup")
-
-    Execute(cleanup_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', params.execute_path],
-            logoutput=True
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
deleted file mode 100644
index 2dbdd6f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
+++ /dev/null
@@ -1,232 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-import os
-
-
-def hive(name=None):
-  import params
-
-  if name == 'hiveserver2':
-
-    params.HdfsDirectory(params.hive_apps_whs_dir,
-                         action="create_delayed",
-                         owner=params.hive_user,
-                         mode=0777
-    )
-    params.HdfsDirectory(params.hive_hdfs_user_dir,
-                         action="create_delayed",
-                         owner=params.hive_user,
-                         mode=params.hive_hdfs_user_mode
-    )
-    params.HdfsDirectory(None, action="create")
-
-  Directory(params.hive_conf_dir_prefix,
-            mode=0755
-  )
-
-  # We should change configurations for client as well as for server.
-  # The reason is that stale-configs are service-level, not component.
-  for conf_dir in params.hive_conf_dirs_list:
-    fill_conf_dir(conf_dir)
-
-  XmlConfig("hive-site.xml",
-            conf_dir=params.hive_config_dir,
-            configurations=params.config['configurations']['hive-site'],
-            configuration_attributes=params.config['configuration_attributes']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-  
-  if params.hive_specific_configs_supported and name == 'hiveserver2':
-    XmlConfig("hiveserver2-site.xml",
-              conf_dir=params.hive_server_conf_dir,
-              configurations=params.config['configurations']['hiveserver2-site'],
-              configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
-              owner=params.hive_user,
-              group=params.user_group,
-              mode=0644)
-  
-  File(format("{hive_config_dir}/hive-env.sh"),
-       owner=params.hive_user,
-       group=params.user_group,
-       content=InlineTemplate(params.hive_env_sh_template)
-  )
-
-  if name == 'metastore' or name == 'hiveserver2':
-    jdbc_connector()
-    
-  environment = {
-    "no_proxy": format("{ambari_server_hostname}")
-  }
-
-  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf -x \"\" "
-               "--retry 5 "
-               "{jdk_location}{check_db_connection_jar_name} "
-               "-o {check_db_connection_jar_name}'")
-
-  Execute(cmd,
-          not_if=format("[ -f {check_db_connection_jar} ]"),
-          environment = environment)
-
-  if name == 'metastore':
-    File(params.start_metastore_path,
-         mode=0755,
-         content=StaticFile('startMetastore.sh')
-    )
-    if params.init_metastore_schema:
-      create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
-                                 "{hive_bin}/schematool -initSchema "
-                                 "-dbType {hive_metastore_db_type} "
-                                 "-userName {hive_metastore_user_name} "
-                                 "-passWord {hive_metastore_user_passwd!p}")
-
-      check_schema_created_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
-                                        "{hive_bin}/schematool -info "
-                                        "-dbType {hive_metastore_db_type} "
-                                        "-userName {hive_metastore_user_name} "
-                                        "-passWord {hive_metastore_user_passwd!p}")
-
-      Execute(create_schema_cmd,
-              not_if = check_schema_created_cmd
-      )
-  elif name == 'hiveserver2':
-    File(params.start_hiveserver2_path,
-         mode=0755,
-         content=Template(format('{start_hiveserver2_script}'))
-    )
-
-  if name != "client":
-    crt_directory(params.hive_pid_dir)
-    crt_directory(params.hive_log_dir)
-    crt_directory(params.hive_var_lib)
-
-def fill_conf_dir(component_conf_dir):
-  import params
-  
-  Directory(component_conf_dir,
-            owner=params.hive_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  XmlConfig("mapred-site.xml",
-            conf_dir=component_conf_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            configuration_attributes=params.config['configuration_attributes']['mapred-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0644)
-
-
-  crt_file(format("{component_conf_dir}/hive-default.xml.template"))
-  crt_file(format("{component_conf_dir}/hive-env.sh.template"))
-
-  log4j_exec_filename = 'hive-exec-log4j.properties'
-  if (params.log4j_exec_props != None):
-    File(format("{component_conf_dir}/{log4j_exec_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=params.log4j_exec_props
-    )
-  elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
-    File(format("{component_conf_dir}/{log4j_exec_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
-    )
-
-  log4j_filename = 'hive-log4j.properties'
-  if (params.log4j_props != None):
-    File(format("{component_conf_dir}/{log4j_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=params.log4j_props
-    )
-  elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
-    File(format("{component_conf_dir}/{log4j_filename}"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hive_user,
-         content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
-    )
-
-
-def crt_directory(name):
-  import params
-
-  Directory(name,
-            recursive=True,
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0755)
-
-
-def crt_file(name):
-  import params
-
-  File(name,
-       owner=params.hive_user,
-       group=params.user_group
-  )
-
-
-def jdbc_connector():
-  import params
-
-  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
-    File(params.target,
-         action="delete",
-    )
-    Execute(('cp', format('/usr/share/java/{jdbc_jar_name}'), params.target),
-            not_if=format("test -f {target}"),
-            creates=params.target,
-            path=["/bin", "/usr/bin/"],
-            sudo=True
-    )
-  elif params.hive_jdbc_driver == "org.postgresql.Driver":
-    cmd = format("mkdir -p {artifact_dir} ; rm -f {target} ; cp /usr/share/java/{jdbc_jar_name} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            creates=params.target,
-            environment= {'PATH' : params.execute_path },
-            path=["/bin", "usr/bin/"])
-
-  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-    environment = {
-      "no_proxy": format("{ambari_server_hostname}")
-    }
-
-    cmd = format(
-      "mkdir -p {artifact_dir} ; "
-      "curl -kf -x \"\" --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
-      "rm -f {target} ; "
-      "cp {driver_curl_target} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            path=["/bin", "/usr/bin/"],
-            environment=environment)

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py
deleted file mode 100644
index 499f632..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_client.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import sys
-from resource_management import *
-
-from hive import hive
-
-class HiveClient(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='client')
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py
deleted file mode 100644
index dc02a7d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_metastore.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hive import hive
-from hive_service import hive_service
-from mysql_service import mysql_service
-
-class HiveMetastore(Script):
-
-  def install(self, env):
-    import params
-    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='metastore')
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    hive_service( 'metastore',
-                   action = 'start'
-    )
-
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-
-    hive_service( 'metastore',
-                   action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-if __name__ == "__main__":
-  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_server.py
deleted file mode 100644
index fa8ece4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_server.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hive import hive
-from hive_service import hive_service
-from resource_management.libraries.functions.dynamic_variable_interpretation import copy_tarballs_to_hdfs
-from install_jars import install_tez_jars
-
-class HiveServer(Script):
-
-  def install(self, env):
-    import params
-    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    if not (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >=0):
-      install_tez_jars()
-    hive(name='hiveserver2')
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-
-    # This function is needed in HDP 2.2, but it is safe to call in earlier versions.
-    copy_tarballs_to_hdfs('mapreduce', params.tez_user, params.hdfs_user, params.user_group)
-    copy_tarballs_to_hdfs('tez', params.tez_user, params.hdfs_user, params.user_group)
-
-    hive_service( 'hiveserver2',
-                  action = 'start'
-    )
-
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-
-    hive_service( 'hiveserver2',
-                  action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-
-if __name__ == "__main__":
-  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py
deleted file mode 100644
index 8e5d878..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-import time
-from resource_management.core import shell
-
-def hive_service(
-    name,
-    action='start'):
-
-  import params
-
-  if name == 'metastore':
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    cmd = format(
-      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir} {hive_log_dir}")
-  elif name == 'hiveserver2':
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    cmd = format(
-      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir} {hive_log_dir}")
-
-  process_id_exists = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
-  
-  if action == 'start':
-    if name == 'hiveserver2':
-      check_fs_root()
-
-    demon_cmd = format("{cmd}")
-    
-    Execute(demon_cmd,
-            user=params.hive_user,
-            environment={'HADOOP_HOME': params.hadoop_home},
-            path=params.execute_path,
-            not_if=process_id_exists
-    )
-
-    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or \
-       params.hive_jdbc_driver == "org.postgresql.Driver" or \
-       params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-      
-      db_connection_check_command = format(
-        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_jdbc_driver}")
-      
-      Execute(db_connection_check_command,
-              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10)
-      
-    # AMBARI-5800 - wait for the server to come up instead of just the PID existance
-    if name == 'hiveserver2':
-      SOCKET_WAIT_SECONDS = 120
-      address=params.hive_server_host
-      port=int(params.hive_server_port)
-      
-      start_time = time.time()
-      end_time = start_time + SOCKET_WAIT_SECONDS
-
-      is_service_socket_valid = False
-      print "Waiting for the Hive server to start..."
-      if params.security_enabled:
-        kinitcmd=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
-      else:
-        kinitcmd=None
-      while time.time() < end_time:
-        try:
-          check_thrift_port_sasl(address, port, params.hive_server2_authentication,
-                                 params.hive_server_principal, kinitcmd, params.smokeuser)
-          is_service_socket_valid = True
-          break
-        except Exception, e:
-          time.sleep(5)
-
-      elapsed_time = time.time() - start_time
-      
-      if is_service_socket_valid == False: 
-        raise Fail("Connection to Hive server %s on port %s failed after %d seconds" % (address, port, elapsed_time))
-      
-      print "Successfully connected to Hive at %s on port %s after %d seconds" % (address, port, elapsed_time)    
-            
-  elif action == 'stop':
-    demon_cmd = format("sudo kill `cat {pid_file}`")
-    Execute(demon_cmd,
-         not_if = format("! ({process_id_exists})")
-    )
-    File(pid_file,
-         action = "delete",
-    )
-
-def check_fs_root():
-  import params  
-  fs_root_url = format("{fs_root}{hive_apps_whs_dir}")
-  cmd = format("metatool -listFSRoot 2>/dev/null | grep hdfs:// | grep -v '.db$'")
-  code, out = shell.call(cmd, user=params.hive_user)
-  if code == 0 and fs_root_url.strip() != out.strip():
-    cmd = format("metatool -updateLocation {fs_root}{hive_apps_whs_dir} {out}")
-    Execute(cmd,
-            environment= {'PATH' : params.execute_path },
-            user=params.hive_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/install_jars.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/install_jars.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/install_jars.py
deleted file mode 100644
index a18ca72..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/install_jars.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import os
-
-def install_tez_jars():
-  import params
-
-  destination_hdfs_dirs = get_tez_hdfs_dir_paths(params.tez_lib_uris)
-
-  # If tez libraries are to be stored in hdfs
-  if destination_hdfs_dirs:
-    for hdfs_dir in destination_hdfs_dirs:
-      params.HdfsDirectory(hdfs_dir,
-                           action="create_delayed",
-                           owner=params.tez_user,
-                           mode=0755
-      )
-    pass
-    params.HdfsDirectory(None, action="create")
-
-    if params.security_enabled:
-      kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
-    else:
-      kinit_if_needed = ""
-
-    if kinit_if_needed:
-      Execute(kinit_if_needed,
-              user=params.tez_user,
-              path='/bin'
-      )
-    pass
-
-    app_dir_path = None
-    lib_dir_path = None
-
-    if len(destination_hdfs_dirs) > 0:
-      for path in destination_hdfs_dirs:
-        if 'lib' in path:
-          lib_dir_path = path
-        else:
-          app_dir_path = path
-        pass
-      pass
-    pass
-
-    if app_dir_path:
-      for scr_file, dest_file in params.app_dir_files.iteritems():
-        CopyFromLocal(scr_file,
-                      mode=0755,
-                      owner=params.tez_user,
-                      dest_dir=app_dir_path,
-                      dest_file=dest_file,
-                      kinnit_if_needed=kinit_if_needed,
-                      hdfs_user=params.hdfs_user,
-                      hadoop_bin_dir=params.hadoop_bin_dir,
-                      hadoop_conf_dir=params.hadoop_conf_dir
-        )
-
-    if lib_dir_path:
-      CopyFromLocal(params.tez_local_lib_jars,
-                    mode=0755,
-                    owner=params.tez_user,
-                    dest_dir=lib_dir_path,
-                    kinnit_if_needed=kinit_if_needed,
-                    hdfs_user=params.hdfs_user,
-                    hadoop_bin_dir=params.hadoop_bin_dir,
-                    hadoop_conf_dir=params.hadoop_conf_dir
-      )
-    pass
-
-
-def get_tez_hdfs_dir_paths(tez_lib_uris = None):
-  hdfs_path_prefix = 'hdfs://'
-  lib_dir_paths = []
-  if tez_lib_uris and tez_lib_uris.strip().find(hdfs_path_prefix, 0) != -1:
-    dir_paths = tez_lib_uris.split(',')
-    for path in dir_paths:
-      if not "tez.tar.gz" in path:
-        lib_dir_path = path.replace(hdfs_path_prefix, '')
-        lib_dir_path = lib_dir_path if lib_dir_path.endswith(os.sep) else lib_dir_path + os.sep
-        lib_dir_paths.append(lib_dir_path)
-      else:
-        lib_dir_path = path.replace(hdfs_path_prefix, '')
-        lib_dir_paths.append(os.path.dirname(lib_dir_path))
-    pass
-  pass
-
-  return lib_dir_paths

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_server.py
deleted file mode 100644
index 91a699b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_server.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from mysql_service import mysql_service
-
-
-class MysqlServer(Script):
-  def install(self, env):
-    import params
-    self.install_packages(env, exclude_packages=params.hive_exclude_packages)
-    self.configure(env)
-
-  def clean(self, env):
-    import params, mysql_users
-    env.set_params(params)
-    mysql_users.mysql_deluser(params)
-
-  def configure(self, env):
-    import params, mysql_users
-    env.set_params(params)
-    mysql_users.mysql_adduser(params)
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    mysql_service(daemon_name=params.daemon_name, action='start')
-
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    mysql_service(daemon_name=params.daemon_name, action='stop')
-
-  def status(self, env):
-    import status_params
-
-    mysql_service(daemon_name=status_params.daemon_name, action='status')
-
-
-if __name__ == "__main__":
-  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_service.py
deleted file mode 100644
index 2f0c6f6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_service.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def mysql_service(daemon_name=None, action='start'): 
-  status_cmd = format('service {daemon_name} status | grep running')
-  cmd = ('service', daemon_name, action)
-
-  if action == 'status':
-    Execute(status_cmd)
-  elif action == 'stop':
-    import params
-    Execute(cmd,
-            logoutput = True,
-            only_if = status_cmd,
-            sudo = True,
-    )
-  elif action == 'start':
-    import params
-    # required for running hive
-    replace_bind_address = ('sed','-i','s|^bind-address[ \t]*=.*|bind-address = 0.0.0.0|',params.mysql_configname)
-    Execute(replace_bind_address,
-            sudo = True,
-    )
-    
-    Execute(cmd,
-      logoutput = True,
-      not_if = status_cmd,
-      sudo = True,
-    )
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_users.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_users.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_users.py
deleted file mode 100644
index 9961c18..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/mysql_users.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-# Used to add hive access to the needed components
-def mysql_adduser(params):
-  File(params.mysql_adduser_path,
-       mode=0755,
-       content=StaticFile('addMysqlUser.sh')
-  )
-  hive_server_host = format("{hive_server_host}")
-  hive_metastore_host = format("{hive_metastore_host}")
-
-  add_metastore_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_metastore_host}"
-  add_hiveserver_cmd = "bash -x {mysql_adduser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_server_host}"
-  if (hive_server_host == hive_metastore_host):
-    cmd = format(add_hiveserver_cmd)
-  else:
-    cmd = format(add_hiveserver_cmd + ";" + add_metastore_cmd)
-  Execute(cmd,
-          tries=3,
-          try_sleep=5,
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )
-
-# Removes hive access from components
-def mysql_deluser(params):
-  File(params.mysql_deluser_path,
-       mode=0755,
-       content=StaticFile('removeMysqlUser.sh')
-  )
-  hive_server_host = format("{hive_server_host}")
-  hive_metastore_host = format("{hive_metastore_host}")
-
-  del_hiveserver_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_server_host}"
-  del_metastore_cmd = "bash -x {mysql_deluser_path} {daemon_name} {hive_metastore_user_name} {hive_metastore_host}"
-  if (hive_server_host == hive_metastore_host):
-    cmd = format(del_hiveserver_cmd)
-  else:
-    cmd = format(
-      del_hiveserver_cmd + ";" + del_metastore_cmd)
-  Execute(cmd,
-          tries=3,
-          try_sleep=5,
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
deleted file mode 100644
index bb7f1f4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
+++ /dev/null
@@ -1,282 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
-import status_params
-import os
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-# This is expected to be of the form #.#.#.#
-hdp_stack_version = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
-stack_is_hdp21 = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.1') >= 0 and compare_versions(hdp_stack_version, '2.2') < 0
-
-# Hadoop params
-# TODO, this logic should initialize these parameters in a file inside the HDP 2.2 stack.
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >=0:
-  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
-  hadoop_home = '/usr/hdp/current/hadoop-client'
-  hive_bin = '/usr/hdp/current/hive-client/bin'
-  hive_lib = '/usr/hdp/current/hive-client/lib'
-
-  hcat_lib = '/usr/hdp/current/hive-webhcat/share/hcatalog'
-  webhcat_bin_dir = '/usr/hdp/current/hive-webhcat/sbin'
-  
-  hive_specific_configs_supported = True
-else:
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_home = '/usr'
-  hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
-  hive_bin = '/usr/lib/hive/bin'
-  hive_lib = '/usr/lib/hive/lib/'
-  pig_tar_file = '/usr/share/HDP-webhcat/pig.tar.gz'
-  hive_tar_file = '/usr/share/HDP-webhcat/hive.tar.gz'
-  sqoop_tar_file = '/usr/share/HDP-webhcat/sqoop*.tar.gz'
-
-  if hdp_stack_version != "" and compare_versions(hdp_stack_version, "2.1.0.0") < 0:
-    hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
-    webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
-  # for newer versions
-  else:
-    hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
-    webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
-    
-  hive_specific_configs_supported = False
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-hive_conf_dir_prefix = "/etc/hive"
-hive_conf_dir = format("{hive_conf_dir_prefix}/conf")
-hive_client_conf_dir = format("{hive_conf_dir_prefix}/conf")
-hive_server_conf_dir = format("{hive_conf_dir_prefix}/conf.server")
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, "2.1.0.0") < 0:
-  hcat_conf_dir = '/etc/hcatalog/conf'
-  config_dir = '/etc/hcatalog/conf'
-# for newer versions
-else:
-  hcat_conf_dir = '/etc/hive-hcatalog/conf'
-  config_dir = '/etc/hive-webhcat/conf'
-
-execute_path = os.environ['PATH'] + os.pathsep + hive_bin + os.pathsep + hadoop_bin_dir
-hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
-hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
-
-hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
-hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
-
-#users
-hive_user = config['configurations']['hive-env']['hive_user']
-#JDBC driver jar name
-hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
-if hive_jdbc_driver == "com.mysql.jdbc.Driver":
-  jdbc_jar_name = "mysql-connector-java.jar"
-  jdbc_symlink_name = "mysql-jdbc-driver.jar"
-elif hive_jdbc_driver == "org.postgresql.Driver":
-  jdbc_jar_name = "postgresql-jdbc.jar"
-  jdbc_symlink_name = "postgres-jdbc-driver.jar"
-elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-  jdbc_jar_name = "ojdbc6.jar"
-  jdbc_symlink_name = "oracle-jdbc-driver.jar"
-
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-
-#common
-hive_metastore_host = config['clusterHostInfo']['hive_metastore_host'][0]
-hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
-hive_var_lib = '/var/lib/hive'
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
-hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
-hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
-hive_server_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
-hive_server2_authentication = config['configurations']['hive-site']['hive.server2.authentication']
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smoke_test_sql = format("{tmp_dir}/hiveserver2.sql")
-smoke_test_path = format("{tmp_dir}/hiveserver2Smoke.sh")
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-
-fs_root = config['configurations']['core-site']['fs.defaultFS']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
-
-#hive_env
-hive_dbroot = config['configurations']['hive-env']['hive_dbroot']
-hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
-hive_pid_dir = status_params.hive_pid_dir
-hive_pid = status_params.hive_pid
-#Default conf dir for client
-hive_conf_dirs_list = [hive_server_conf_dir, hive_client_conf_dir]
-
-if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
-  hive_config_dir = hive_server_conf_dir
-else:
-  hive_config_dir = hive_client_conf_dir
-
-#hive-site
-hive_database_name = config['configurations']['hive-env']['hive_database_name']
-
-#Starting hiveserver2
-start_hiveserver2_script = 'startHiveserver2.sh.j2'
-
-##Starting metastore
-start_metastore_script = 'startMetastore.sh'
-hive_metastore_pid = status_params.hive_metastore_pid
-java_share_dir = '/usr/share/java'
-driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
-
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-
-target = format("{hive_lib}/{jdbc_jar_name}")
-
-jdk_location = config['hostLevelParams']['jdk_location']
-driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
-
-start_hiveserver2_path = format("{tmp_dir}/start_hiveserver2_script")
-start_metastore_path = format("{tmp_dir}/start_metastore_script")
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
-java64_home = config['hostLevelParams']['java_home']
-
-##### MYSQL
-
-db_name = config['configurations']['hive-env']['hive_database_name']
-mysql_group = 'mysql'
-mysql_host = config['clusterHostInfo']['hive_mysql_host']
-
-mysql_adduser_path = format("{tmp_dir}/addMysqlUser.sh")
-mysql_deluser_path = format("{tmp_dir}/removeMysqlUser.sh")
-
-######## Metastore Schema
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, "2.1.0.0") < 0:
-  init_metastore_schema = False
-else:
-  init_metastore_schema = True
-
-########## HCAT
-
-hcat_dbroot = hcat_lib
-
-hcat_user = config['configurations']['hive-env']['hcat_user']
-webhcat_user = config['configurations']['hive-env']['webhcat_user']
-
-hcat_pid_dir = status_params.hcat_pid_dir
-hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-hcat_env_sh_template = config['configurations']['hcat-env']['content']
-
-#hive-log4j.properties.template
-if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
-  log4j_props = config['configurations']['hive-log4j']['content']
-else:
-  log4j_props = None
-
-#hive-exec-log4j.properties.template
-if (('hive-exec-log4j' in config['configurations']) and ('content' in config['configurations']['hive-exec-log4j'])):
-  log4j_exec_props = config['configurations']['hive-exec-log4j']['content']
-else:
-  log4j_exec_props = None
-
-daemon_name = status_params.daemon_name
-hive_env_sh_template = config['configurations']['hive-env']['content']
-
-hive_hdfs_user_dir = format("/user/{hive_user}")
-hive_hdfs_user_mode = 0700
-hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
-#for create_hdfs_directory
-hostname = config["hostname"]
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-# Tez-related properties
-tez_user = config['configurations']['tez-env']['tez_user']
-
-# Tez jars
-tez_local_api_jars = '/usr/lib/tez/tez*.jar'
-tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
-app_dir_files = {tez_local_api_jars:None}
-
-# Tez libraries
-tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
-
-if System.get_instance().os_family == "ubuntu":
-  mysql_configname = '/etc/mysql/my.cnf'
-else:
-  mysql_configname = '/etc/my.cnf'
-  
-mysql_user = 'mysql'
-
-# Hive security
-hive_authorization_enabled = config['configurations']['hive-site']['hive.security.authorization.enabled']
-
-mysql_jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-
-# There are other packages that contain /usr/share/java/mysql-connector-java.jar (like libmysql-java),
-# trying to install mysql-connector-java upon them can cause packages to conflict.
-if os.path.exists(mysql_jdbc_driver_jar):
-  hive_exclude_packages = ['mysql-connector-java']
-else:  
-  hive_exclude_packages = []
-
-########################################################
-########### WebHCat related params #####################
-########################################################
-
-webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
-templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-templeton_pid_dir = status_params.hcat_pid_dir
-
-webhcat_pid_file = status_params.webhcat_pid_file
-
-templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
-
-
-webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
-
-webhcat_apps_dir = "/apps/webhcat"
-
-hcat_hdfs_user_dir = format("/user/{hcat_user}")
-hcat_hdfs_user_mode = 0755
-webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-webhcat_hdfs_user_mode = 0755
-#for create_hdfs_directory
-security_param = "true" if security_enabled else "false"
-
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir = hadoop_conf_dir,
-  hdfs_user = hdfs_principal_name if security_enabled else hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/service_check.py
deleted file mode 100644
index 945a676..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/service_check.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import socket
-import sys
-
-from hcat_service_check import hcat_service_check
-from webhcat_service_check import webhcat_service_check
-
-class HiveServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    address=format("{hive_server_host}")
-    port=int(format("{hive_server_port}"))
-    print "Test connectivity to hive server"
-    if params.security_enabled:
-      kinitcmd=format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
-    else:
-      kinitcmd=None
-
-    try:
-      check_thrift_port_sasl(address, port, params.hive_server2_authentication,
-                             params.hive_server_principal, kinitcmd, params.smokeuser)
-      print "Successfully connected to %s on port %s" % (address, port)
-    except:
-      print "Connection to %s on port %s failed" % (address, port)
-      exit(1)
-
-    hcat_service_check()
-    webhcat_service_check()
-
-if __name__ == "__main__":
-  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
deleted file mode 100644
index e6f2514..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
-hive_pid = 'hive-server.pid'
-
-hive_metastore_pid = 'hive.pid'
-
-hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
-webhcat_pid_file = format('{hcat_pid_dir}/webhcat.pid')
-
-if System.get_instance().os_family == "suse" or System.get_instance().os_family == "ubuntu":
-  daemon_name = 'mysql'
-else:
-  daemon_name = 'mysqld'
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat.py
deleted file mode 100644
index c02bf74..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat.py
+++ /dev/null
@@ -1,143 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-import os.path
-import glob
-
-from resource_management import *
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.dynamic_variable_interpretation import copy_tarballs_to_hdfs
-
-
-def webhcat():
-  import params
-
-  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, "2.2.0.0") < 0:
-    params.HdfsDirectory(params.webhcat_apps_dir,
-                         action="create_delayed",
-                         owner=params.webhcat_user,
-                         mode=0755
-    )
-  
-  if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
-    params.HdfsDirectory(params.hcat_hdfs_user_dir,
-                         action="create_delayed",
-                         owner=params.hcat_user,
-                         mode=params.hcat_hdfs_user_mode
-    )
-  params.HdfsDirectory(params.webhcat_hdfs_user_dir,
-                       action="create_delayed",
-                       owner=params.webhcat_user,
-                       mode=params.webhcat_hdfs_user_mode
-  )
-  params.HdfsDirectory(None, action="create")
-
-  Directory(params.templeton_pid_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.templeton_log_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.config_dir,
-            recursive=True,
-            owner=params.webhcat_user,
-            group=params.user_group)
-
-  if params.security_enabled:
-    kinit_if_needed = format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
-  else:
-    kinit_if_needed = ""
-
-  if kinit_if_needed:
-    Execute(kinit_if_needed,
-            user=params.webhcat_user,
-            path='/bin'
-    )
-
-  # TODO, these checks that are specific to HDP 2.2 and greater should really be in a script specific to that stack.
-  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, "2.2.0.0") >= 0:
-    copy_tarballs_to_hdfs('hive', params.webhcat_user, params.hdfs_user, params.user_group)
-    copy_tarballs_to_hdfs('pig', params.webhcat_user, params.hdfs_user, params.user_group)
-    copy_tarballs_to_hdfs('hadoop-streaming', params.webhcat_user, params.hdfs_user, params.user_group)
-    copy_tarballs_to_hdfs('sqoop', params.webhcat_user, params.hdfs_user, params.user_group)
-  else:
-    CopyFromLocal(params.hadoop_streeming_jars,
-                  owner=params.webhcat_user,
-                  mode=0755,
-                  dest_dir=params.webhcat_apps_dir,
-                  kinnit_if_needed=kinit_if_needed,
-                  hdfs_user=params.hdfs_user,
-                  hadoop_bin_dir=params.hadoop_bin_dir,
-                  hadoop_conf_dir=params.hadoop_conf_dir
-    )
-
-    if (os.path.isfile(params.pig_tar_file)):
-      CopyFromLocal(params.pig_tar_file,
-                    owner=params.webhcat_user,
-                    mode=0755,
-                    dest_dir=params.webhcat_apps_dir,
-                    kinnit_if_needed=kinit_if_needed,
-                    hdfs_user=params.hdfs_user,
-                    hadoop_bin_dir=params.hadoop_bin_dir,
-                    hadoop_conf_dir=params.hadoop_conf_dir
-      )
-
-    CopyFromLocal(params.hive_tar_file,
-                  owner=params.webhcat_user,
-                  mode=0755,
-                  dest_dir=params.webhcat_apps_dir,
-                  kinnit_if_needed=kinit_if_needed,
-                  hdfs_user=params.hdfs_user,
-                  hadoop_bin_dir=params.hadoop_bin_dir,
-                  hadoop_conf_dir=params.hadoop_conf_dir
-    )
-
-    if (len(glob.glob(params.sqoop_tar_file)) > 0):
-      CopyFromLocal(params.sqoop_tar_file,
-                    owner=params.webhcat_user,
-                    mode=0755,
-                    dest_dir=params.webhcat_apps_dir,
-                    kinnit_if_needed=kinit_if_needed,
-                    hdfs_user=params.hdfs_user,
-                    hadoop_bin_dir=params.hadoop_bin_dir,
-                    hadoop_conf_dir=params.hadoop_conf_dir
-      )
-
-  XmlConfig("webhcat-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['webhcat-site'],
-            configuration_attributes=params.config['configuration_attributes']['webhcat-site'],
-            owner=params.webhcat_user,
-            group=params.user_group,
-            )
-
-  File(format("{config_dir}/webhcat-env.sh"),
-       owner=params.webhcat_user,
-       group=params.user_group,
-       content=InlineTemplate(params.webhcat_env_sh_template)
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_server.py
deleted file mode 100644
index a8b3a8f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_server.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from webhcat import webhcat
-from webhcat_service import webhcat_service
-
-class WebHCatServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    webhcat()
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    webhcat_service(action = 'start')
-
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-
-    webhcat_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.webhcat_pid_file)
-
-if __name__ == "__main__":
-  WebHCatServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service.py
deleted file mode 100644
index 25a60e8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-def webhcat_service(action='start'):
-  import params
-
-  cmd = format('env HADOOP_HOME={hadoop_home} {webhcat_bin_dir}/webhcat_server.sh')
-
-  if action == 'start':
-    demon_cmd = format('{cmd} start')
-    no_op_test = format('ls {webhcat_pid_file} >/dev/null 2>&1 && ps -p `cat {webhcat_pid_file}` >/dev/null 2>&1')
-    Execute(demon_cmd,
-            user=params.webhcat_user,
-            not_if=no_op_test
-    )
-  elif action == 'stop':
-    demon_cmd = format('{cmd} stop')
-    Execute(demon_cmd,
-            user=params.webhcat_user
-    )
-    File(params.webhcat_pid_file,
-         action="delete",
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service_check.py
deleted file mode 100644
index 8d15e47..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service_check.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def webhcat_service_check():
-  import params
-  File(format("{tmp_dir}/templetonSmoke.sh"),
-       content= StaticFile('templetonSmoke.sh'),
-       mode=0755
-  )
-
-  cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
-               " {security_param} {kinit_path_local}",
-               smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-  Execute(cmd,
-          tries=3,
-          try_sleep=5,
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-          logoutput=True)
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/startHiveserver2.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/startHiveserver2.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/startHiveserver2.sh.j2
deleted file mode 100644
index 70b418c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/startHiveserver2.sh.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-HIVE_SERVER2_OPTS=" -hiveconf hive.log.file=hiveserver2.log -hiveconf hive.log.dir=$5"
-HIVE_CONF_DIR=$4 {{hive_bin}}/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
-echo $!|cat>$3


[07/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
deleted file mode 100644
index 1d6618d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
+++ /dev/null
@@ -1,210 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_root_logger</name>
-    <value>INFO,RFA</value>
-    <description>Hadoop Root Logger</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_permsize</name>
-    <value>128</value>
-    <description>NameNode permanent generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxpermsize</name>
-    <value>256</value>
-    <description>NameNode maximum permanent generation size</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <property-type>GROUP</property-type>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <property-type>USER</property-type>
-    <description>User to run HDFS as</description>
-  </property>
-  <property>
-    <name>dfs.datanode.data.dir.mount.file</name>
-    <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value>
-    <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
-  </property>
-
-  <!-- hadoop-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for hadoop-env.sh file</description>
-    <value>
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME={{java_home}}
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop home directory
-export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
-# Hadoop Configuration Directory
-#TODO: if env var set that can cause problems
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
-
-{# this is different for HDP1 #}
-# Path to jsvc required by secure HDP 2.0 datanode
-export JSVC_HOME={{jsvc_path}}
-
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
-# History server logs
-export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# History server pid
-export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
-
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
-
-# Use libraries from standard classpath
-JAVA_JDBC_LIBS=""
-#Add libraries required by mysql connector
-for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by oracle connector
-for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by nodemanager
-MAPREDUCE_LIBS={{mapreduce_libs_path}}
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
-if [ -d "/usr/lib/tez" ]; then
-  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
-fi
-
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-
-#Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
-    </value>
-  </property>
-  
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 41bde16..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true">
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml
deleted file mode 100644
index 08822eb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-log4j.xml
+++ /dev/null
@@ -1,201 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-# Define some default values that can be overridden by system properties
-# To change daemon root logger use hadoop_root_logger in hadoop-env
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-#Security audit appender
-#
-hadoop.security.logger=INFO,console
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-log4j.appender.RFA.MaxFileSize=256MB
-log4j.appender.RFA.MaxBackupIndex=10
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index e67ed9f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,430 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration supports_final="true">
-
-  <!-- file system properties -->
-
-  <property>
-    <name>dfs.namenode.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>Whether to enable WebHDFS feature</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-  </property>
-
-  <!--
-    <property>
-      <name>dfs.hosts</name>
-      <value>/etc/hadoop/conf/dfs.include</value>
-      <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-    </property>
-  -->
-
-  <property>
-    <name>dfs.namenode.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>${dfs.namenode.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as dfs.namenode.checkpoint.dir
-    </description>
-  </property>
-
-
-  <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.checkpoint.txns</name>
-    <value>1000000</value>
-    <description>The Secondary NameNode or CheckpointNode will create a checkpoint
-      of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
-      regardless of whether 'dfs.namenode.checkpoint.period' has expired.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>1.0f</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.namenode.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:50010</value>
-    <description>
-      The datanode server address and port for data transfer.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:50075</value>
-    <description>
-      The datanode http server address and port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.https.address</name>
-    <value>0.0.0.0:50475</value>
-    <description>
-      The datanode https server address and port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.http-address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>1024</value>
-    <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.enabled</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.superusergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.namenode.secondary.http-address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-
-  <property>
-    <name>dfs.namenode.https-address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <description>The permissions that should be there on dfs.datanode.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.datanode.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.http-address</name>
-    <value>0.0.0.0:8480</value>
-    <description>The address and port the JournalNode web UI listens on.
-      If the port is 0 then the server will start on a free port. </description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.edits.dir</name>
-    <value>/grid/0/hdfs/journal</value>
-    <description>The path where the JournalNode daemon will store its local state. </description>
-  </property>
-
-  <!-- HDFS Short-Circuit Local Reads -->
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>
-      This configuration parameter turns on short-circuit local reads.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <description>
-      This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
-      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
-    <value>4096</value>
-    <description>
-      The DFSClient maintains a cache of recently opened file descriptors. This
-      parameter controls the size of that cache. Setting this higher will use
-      more file descriptors, but potentially provide better performance on
-      workloads involving lots of seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.name.dir.restore</name>
-    <value>true</value>
-    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
-      When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.policy</name>
-    <value>HTTP_ONLY</value>
-    <description>
-      Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
-      The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
-      Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
index 93504a4..a5d6862 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
@@ -20,206 +20,7 @@
   <services>
     <service>
       <name>HDFS</name>
-      <displayName>HDFS</displayName>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>2.1.0.2.0</version>
-
-      <components>
-        <component>
-          <name>NAMENODE</name>
-          <displayName>NameNode</displayName>
-          <category>MASTER</category>
-          <cardinality>1-2</cardinality>
-          <commandScript>
-            <script>scripts/namenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>REBALANCEHDFS</name>
-              <background>true</background>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>DATANODE</name>
-          <displayName>DataNode</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/datanode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>SECONDARY_NAMENODE</name>
-          <displayName>SNameNode</displayName>
-          <!-- TODO:  cardinality is conditional on HA usage -->
-          <cardinality>1</cardinality>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/snamenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HDFS_CLIENT</name>
-          <displayName>HDFS Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hdfs_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>hdfs-site.xml</fileName>
-              <dictionaryName>hdfs-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>core-site.xml</fileName>
-              <dictionaryName>core-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
-            </configFile>                          
-            <configFile>
-              <type>env</type>
-              <fileName>hadoop-env.sh</fileName>
-              <dictionaryName>hadoop-env</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-
-        <component>
-          <name>JOURNALNODE</name>
-          <displayName>JournalNode</displayName>
-          <category>SLAVE</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/journalnode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <displayName>ZKFailoverController</displayName>
-          <category>SLAVE</category>
-          <!-- TODO: cardinality is conditional on HA topology -->
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/zkfc_slave.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hadoop</name>
-            </package>
-            <package>
-              <name>hadoop-lzo</name>
-            </package>
-          </packages>
-        </osSpecific>
-        
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>snappy</name>
-            </package>
-            <package>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <name>lzo</name>
-            </package>
-            <package>
-              <name>hadoop-lzo-native</name>
-            </package>
-            <package>
-              <name>hadoop-libhdfs</name>
-            </package>
-            <package>
-              <name>ambari-log4j</name>
-            </package>
-          </packages>
-        </osSpecific>
-        
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>libsnappy1</name>
-            </package>
-            <package>
-              <name>libsnappy-dev</name>
-            </package>
-            <package>
-              <name>liblzo2-2</name>
-            </package>
-            <package>
-              <name>hadoop-hdfs</name>
-            </package>
-            <package>
-              <name>libhdfs0</name>
-            </package>
-            <package>
-              <name>libhdfs0-dev</name>
-            </package>
-          </packages>
-        </osSpecific>
-            
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-env</config-type>
-        <config-type>hadoop-policy</config-type>
-        <config-type>hdfs-log4j</config-type>
-      </configuration-dependencies>
+      <extends>common-services/HDFS/2.1.0.2.0</extends>
     </service>
   </services>
 </metainfo>


[21/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metrics.json b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metrics.json
new file mode 100644
index 0000000..50f9c42
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metrics.json
@@ -0,0 +1,13635 @@
+{
+  "HBASE_REGIONSERVER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/hbase/regionserver/compactionTime_avg_time": {
+            "metric": "hbase.regionserver.compactionTime_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/closeRegion_num_ops": {
+            "metric": "rpc.rpc.closeRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+            "metric": "regionserver.Server.mutationsWithoutWALSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/unassign_num_ops": {
+            "metric": "rpc.rpc.unassign_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/modifyTable_num_ops": {
+            "metric": "rpc.rpc.modifyTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolVersion_avg_time": {
+            "metric": "rpc.rpc.getProtocolVersion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getClosestRowBefore_num_ops": {
+            "metric": "rpc.rpc.getClosestRowBefore_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/slowAppendCount": {
+            "metric": "regionserver.Server.slowAppendCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/lockRow_num_ops": {
+            "metric": "rpc.rpc.lockRow_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/flushRegion_avg_time": {
+            "metric": "rpc.rpc.flushRegion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stopMaster_num_ops": {
+            "metric": "rpc.rpc.stopMaster_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegions/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.openRegions.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/balance_avg_time": {
+            "metric": "rpc.rpc.balance_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/modifyColumn_avg_time": {
+            "metric": "rpc.rpc.modifyColumn_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/multi/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.multi.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/rootIndexSizeKB": {
+            "metric": "hbase.regionserver.rootIndexSizeKB",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getZooKeeper_num_ops": {
+            "metric": "rpc.rpc.getZooKeeper_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheCount": {
+            "metric": "regionserver.Server.blockCacheCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/flushRegion_num_ops": {
+            "metric": "rpc.rpc.flushRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_std_dev": {
+            "metric": "hbase.regionserver.putRequestLatency_std_dev",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_std_dev": {
+            "metric": "hbase.regionserver.getRequestLatency_std_dev",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/get_num_ops": {
+            "metric": "rpc.rpc.get_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stopMaster_avg_time": {
+            "metric": "rpc.rpc.stopMaster_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/removeFromOnlineRegions_num_ops": {
+            "metric": "rpc.rpc.removeFromOnlineRegions_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/ping_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.ping_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openScanner/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.openScanner.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getRegionInfo_avg_time": {
+            "metric": "rpc.rpc.getRegionInfo_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/lockRow_avg_time": {
+            "metric": "rpc.rpc.lockRow_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/commitPending_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.commitPending_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkOOME_num_ops": {
+            "metric": "rpc.rpc.checkOOME_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/reportRSFatalError_num_ops": {
+            "metric": "rpc.rpc.reportRSFatalError_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getConfiguration/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getConfiguration.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/reportRSFatalError_avg_time": {
+            "metric": "rpc.rpc.reportRSFatalError_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/unlockRow/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.unlockRow.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_min": {
+            "metric": "regionserver.Server.Delete_min",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getClusterStatus_num_ops": {
+            "metric": "rpc.rpc.getClusterStatus_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getHTableDescriptors_avg_time": {
+            "metric": "rpc.rpc.getHTableDescriptors_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.rpc.rpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/deleteColumn_num_ops": {
+            "metric": "rpc.rpc.deleteColumn_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/delete/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.delete.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/increment_num_ops": {
+            "metric": "rpc.rpc.increment_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getMapCompletionEvents_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stop/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.stop.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/modifyColumn_num_ops": {
+            "metric": "rpc.rpc.modifyColumn_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkOOME_avg_time": {
+            "metric": "rpc.rpc.checkOOME_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/next/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.next.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcSlowResponse_avg_time": {
+            "metric": "rpc.rpc.RpcSlowResponse_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getConfiguration_avg_time": {
+            "metric": "rpc.rpc.getConfiguration_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getServerName/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getServerName.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/unassign_avg_time": {
+            "metric": "rpc.rpc.unassign_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/delete/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.delete.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/canCommit_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.canCommit_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/multi/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.multi.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_75th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+            "metric": "regionserver.Server.Delete_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/compactRegion_avg_time": {
+            "metric": "rpc.rpc.compactRegion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_num_ops": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/writeRequestsCount": {
+            "metric": "regionserver.Server.writeRequestCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/execCoprocessor_num_ops": {
+            "metric": "rpc.rpc.execCoprocessor_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/canCommit_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.canCommit_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_min": {
+            "metric": "regionserver.Server.Get_min",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/incrementColumnValue_avg_time": {
+            "metric": "rpc.rpc.incrementColumnValue_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/deleteTable_num_ops": {
+            "metric": "rpc.rpc.deleteTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops": {
+            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+            "metric": "regionserver.Server.Mutate_75th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheHitCount": {
+            "metric": "regionserver.Server.blockCacheHitCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/exists_avg_time": {
+            "metric": "rpc.rpc.exists_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/slowPutCount": {
+            "metric": "regionserver.Server.slowPutCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatency_num_ops": {
+            "metric": "hbase.regionserver.fsWriteLatency_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/exists/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.exists.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/delete_num_ops": {
+            "metric": "rpc.rpc.delete_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/exists_num_ops": {
+            "metric": "rpc.rpc.exists_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/regionServerStartup_avg_time": {
+            "metric": "rpc.rpc.regionServerStartup_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndDelete_num_ops": {
+            "metric": "rpc.rpc.checkAndDelete_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/closeRegion_avg_time": {
+            "metric": "rpc.rpc.closeRegion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolSignature_avg_time": {
+            "metric": "rpc.rpc.getProtocolSignature_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/assign_avg_time": {
+            "metric": "rpc.rpc.assign_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/compactionSize_num_ops": {
+            "metric": "hbase.regionserver.compactionSize_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/close_avg_time": {
+            "metric": "rpc.rpc.close_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheSize": {
+            "metric": "regionserver.Server.blockCacheSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+            "metric": "regionserver.Server.Mutate_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getHServerInfo_num_ops": {
+            "metric": "rpc.rpc.getHServerInfo_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stop_avg_time": {
+            "metric": "rpc.rpc.stop_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isStopped_num_ops": {
+            "metric": "rpc.rpc.isStopped_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_median": {
+            "metric": "regionserver.Server.Mutate_median",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_num_ops": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_median": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_median",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isMasterRunning_avg_time": {
+            "metric": "rpc.rpc.isMasterRunning_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/incrementColumnValue_num_ops": {
+            "metric": "rpc.rpc.incrementColumnValue_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_std_dev": {
+            "metric": "hbase.regionserver.deleteRequestLatency_std_dev",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/hdfsBlocksLocalityIndex": {
+            "metric": "hbase.regionserver.hdfsBlocksLocalityIndex",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/readRequestsCount": {
+            "metric": "regionserver.Server.readRequestCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_min": {
+            "metric": "regionserver.Server.Mutate_min",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/storefileIndexSizeMB": {
+            "metric": "regionserver.Server.storeFileIndexSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/assign_num_ops": {
+            "metric": "rpc.rpc.assign_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/close/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.close.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_median": {
+            "metric": "regionserver.Server.Delete_median",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/enableTable_avg_time": {
+            "metric": "rpc.rpc.enableTable_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_mean": {
+            "metric": "regionserver.Server.Mutate_mean",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/close_num_ops": {
+            "metric": "rpc.rpc.close_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getConfiguration/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getConfiguration.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/done_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.done_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/compactionSize_avg_time": {
+            "metric": "hbase.regionserver.compactionSize_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getFromOnlineRegions_avg_time": {
+            "metric": "rpc.rpc.getFromOnlineRegions_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_min": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_min",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/increment/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.increment.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/deleteTable_avg_time": {
+            "metric": "rpc.rpc.deleteTable_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/put/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.put.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/delete_avg_time": {
+            "metric": "rpc.rpc.delete_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/statusUpdate_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.statusUpdate_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegions/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.openRegions.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/compactRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.compactRegion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.rpc.rpcAuthenticationFailures",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openScanner/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.openScanner.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getClusterStatus_avg_time": {
+            "metric": "rpc.rpc.getClusterStatus_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/unlockRow/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.unlockRow.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/removeFromOnlineRegions_avg_time": {
+            "metric": "rpc.rpc.removeFromOnlineRegions_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/put/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.put.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/modifyTable_avg_time": {
+            "metric": "rpc.rpc.modifyTable_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndPut_avg_time": {
+            "metric": "rpc.rpc.checkAndPut_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isStopped/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.isStopped.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/put_avg_time": {
+            "metric": "rpc.rpc.put_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheHitRatio": {
+            "metric": "hbase.regionserver.blockCacheHitRatio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/createTable_avg_time": {
+            "metric": "rpc.rpc.createTable_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_std_dev": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_std_dev",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getHTableDescriptors_num_ops": {
+            "metric": "rpc.rpc.getHTableDescriptors_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getAlterStatus_avg_time": {
+            "metric": "rpc.rpc.getAlterStatus_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getRegionInfo_num_ops": {
+            "metric": "rpc.rpc.getRegionInfo_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/statusUpdate_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.statusUpdate_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/compactRegion_num_ops": {
+            "metric": "rpc.rpc.compactRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isAborted_num_ops": {
+            "metric": "rpc.rpc.isAborted_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_max": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheEvictedCount": {
+            "metric": "regionserver.Server.blockCacheEvictionCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/checkOOME/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.checkOOME.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/disableTable_num_ops": {
+            "metric": "rpc.rpc.disableTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openScanner_num_ops": {
+            "metric": "rpc.rpc.openScanner_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/regionServerReport_num_ops": {
+            "metric": "rpc.rpc.regionServerReport_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegions_avg_time": {
+            "metric": "rpc.rpc.openRegions_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/exists/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.exists.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+            "metric": "regionserver.Server.Mutate_99th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isMasterRunning_num_ops": {
+            "metric": "rpc.rpc.isMasterRunning_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/balanceSwitch_num_ops": {
+            "metric": "rpc.rpc.balanceSwitch_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/offline_num_ops": {
+            "metric": "rpc.rpc.offline_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_max": {
+            "metric": "regionserver.Server.Get_max",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/abort_num_ops": {
+            "metric": "rpc.rpc.abort_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_95th_percentile": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_95th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheHitCachingRatio": {
+            "metric": "hbase.regionserver.blockCacheHitCachingRatio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rollHLogWriter_num_ops": {
+            "metric": "rpc.rpc.rollHLogWriter_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegions_num_ops": {
+            "metric": "rpc.rpc.openRegions_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/splitRegion_avg_time": {
+            "metric": "rpc.rpc.splitRegion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/closeRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.closeRegion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndPut/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.checkAndPut.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+            "metric": "regionserver.Server.Get_99th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_min": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_min",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_std_dev": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_std_dev",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+            "metric": "regionserver.Server.Delete_99th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_max": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getTask_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getTask_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/replicateLogEntries_num_ops": {
+            "metric": "rpc.rpc.replicateLogEntries_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/multi_avg_time": {
+            "metric": "rpc.rpc.multi_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/slowIncrementCount": {
+            "metric": "regionserver.Server.slowIncrementCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+            "metric": "regionserver.Server.Mutate_95th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/compactionQueueSize": {
+            "metric": "regionserver.Server.compactionQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getCatalogTracker_avg_time": {
+            "metric": "rpc.rpc.getCatalogTracker_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/splitRegion_num_ops": {
+            "metric": "rpc.rpc.splitRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/balance_num_ops": {
+            "metric": "rpc.rpc.balance_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/flushTime_num_ops": {
+            "metric": "hbase.regionserver.flushTime_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/shutdown_num_ops": {
+            "metric": "rpc.rpc.shutdown_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatency_num_ops": {
+            "metric": "hbase.regionserver.fsReadLatency_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isAborted/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.isAborted.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+            "metric": "regionserver.Server.Get_75th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getServerName_avg_time": {
+            "metric": "rpc.rpc.getServerName_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/compactionTime_num_ops": {
+            "metric": "hbase.regionserver.compactionTime_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/abort_avg_time": {
+            "metric": "rpc.rpc.abort_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/enableTable_num_ops": {
+            "metric": "rpc.rpc.enableTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/lockRow/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.lockRow.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/stores": {
+            "metric": "regionserver.Server.storeCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/addColumn_avg_time": {
+            "metric": "rpc.rpc.addColumn_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getServerName_num_ops": {
+            "metric": "rpc.rpc.getServerName_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getServerName/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getServerName.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.rpc.rpcAuthenticationSuccesses",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/disableTable_avg_time": {
+            "metric": "rpc.rpc.disableTable_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/abort/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.abort.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegion_avg_time": {
+            "metric": "rpc.rpc.openRegion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/regionServerReport_avg_time": {
+            "metric": "rpc.rpc.regionServerReport_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getAlterStatus_num_ops": {
+            "metric": "rpc.rpc.getAlterStatus_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/flushRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.flushRegion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/next_avg_time": {
+            "metric": "rpc.rpc.next_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+            "metric": "regionserver.Server.Get_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/bulkLoadHFiles_num_ops": {
+            "metric": "rpc.rpc.bulkLoadHFiles_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/ping_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.ping_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatency_avg_time": {
+            "metric": "hbase.regionserver.fsReadLatency_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/flushSize_num_ops": {
+            "metric": "hbase.regionserver.flushSize_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/balanceSwitch_avg_time": {
+            "metric": "rpc.rpc.balanceSwitch_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_max": {
+            "metric": "regionserver.Server.Mutate_max",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/openRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.openRegion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/lockRow/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.lockRow.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.callQueueLen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegion_num_ops": {
+            "metric": "rpc.rpc.openRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/compactRegion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.compactRegion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsSyncLatency_num_ops": {
+            "metric": "hbase.regionserver.fsSyncLatency_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_95th_percentile": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_95th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getOnlineRegions_avg_time": {
+            "metric": "rpc.rpc.getOnlineRegions_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_75th_percentile": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_75th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/move_num_ops": {
+            "metric": "rpc.rpc.move_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stop_num_ops": {
+            "metric": "rpc.rpc.stop_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/replicateLogEntries_avg_time": {
+            "metric": "rpc.rpc.replicateLogEntries_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_mean": {
+            "metric": "regionserver.Server.Get_mean",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/get_avg_time": {
+            "metric": "rpc.rpc.get_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/multi_num_ops": {
+            "metric": "rpc.rpc.multi_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/next/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.next.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/addToOnlineRegions_avg_time": {
+            "metric": "rpc.rpc.addToOnlineRegions_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/deleteColumn_avg_time": {
+            "metric": "rpc.rpc.deleteColumn_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/regions": {
+            "metric": "regionserver.Server.regionCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/bulkLoadHFiles_avg_time": {
+            "metric": "rpc.rpc.bulkLoadHFiles_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isAborted/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.isAborted.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stop/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.stop.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/addToOnlineRegions_num_ops": {
+            "metric": "rpc.rpc.addToOnlineRegions_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/abort/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.abort.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheFree": {
+            "metric": "regionserver.Server.blockCacheFreeSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/offline_avg_time": {
+            "metric": "rpc.rpc.offline_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/unlockRow_avg_time": {
+            "metric": "rpc.rpc.unlockRow_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheMissCount": {
+            "metric": "regionserver.Server.blockCacheMissCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getCatalogTracker_num_ops": {
+            "metric": "rpc.rpc.getCatalogTracker_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkOOME/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.checkOOME.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/flushQueueSize": {
+            "metric": "regionserver.Server.flushQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndPut/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.checkAndPut.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/close/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.close.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/execCoprocessor_avg_time": {
+            "metric": "rpc.rpc.execCoprocessor_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_mean": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_mean",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/createTable_num_ops": {
+            "metric": "rpc.rpc.createTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getConfiguration_num_ops": {
+            "metric": "rpc.rpc.getConfiguration_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isStopped_avg_time": {
+            "metric": "rpc.rpc.isStopped_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rollHLogWriter_avg_time": {
+            "metric": "rpc.rpc.rollHLogWriter_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsSyncLatency_avg_time": {
+            "metric": "hbase.regionserver.fsSyncLatency_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+            "metric": "regionserver.Server.Delete_mean",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getMapCompletionEvents_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_mean": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_mean",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+            "metric": "regionserver.Server.staticIndexSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getFromOnlineRegions_num_ops": {
+            "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+            "metric": "regionserver.Server.mutationsWithoutWALCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/get/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.get.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_median": {
+            "metric": "regionserver.Server.Get_median",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/openScanner_avg_time": {
+            "metric": "rpc.rpc.openScanner_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcSlowResponse_num_ops": {
+            "metric": "rpc.rpc.RpcSlowResponse_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/splitRegion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.splitRegion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isAborted_avg_time": {
+            "metric": "rpc.rpc.isAborted_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/flushRegion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.flushRegion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/flushSize_avg_time": {
+            "metric": "hbase.regionserver.flushSize_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/commitPending_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.commitPending_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getClosestRowBefore_avg_time": {
+            "metric": "rpc.rpc.getClosestRowBefore_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_max": {
+            "metric": "regionserver.Server.Delete_max",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/get/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.get.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/put_num_ops": {
+            "metric": "rpc.rpc.put_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/move_avg_time": {
+            "metric": "rpc.rpc.move_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/percentFilesLocal": {
+            "metric": "regionserver.Server.percentFilesLocal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatency_avg_time": {
+            "metric": "hbase.regionserver.fsWriteLatency_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/increment/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.increment.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.openRegion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getTask_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getTask_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/addColumn_num_ops": {
+            "metric": "rpc.rpc.addColumn_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/maxMemoryM": {
+            "metric": "jvm.metrics.maxMemoryM",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getOnlineRegions_num_ops": {
+            "metric": "rpc.rpc.getOnlineRegions_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/flushTime_avg_time": {
+            "metric": "hbase.regionserver.flushTime_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/done_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.done_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolVersion_num_ops": {
+            "metric": "rpc.rpc.getProtocolVersion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/unlockRow_num_ops": {
+            "metric": "rpc.rpc.unlockRow_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/slowGetCount": {
+            "metric": "regionserver.Server.slowGetCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/shutdown_avg_time": {
+            "metric": "rpc.rpc.shutdown_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/regionServerStartup_num_ops": {
+            "metric": "rpc.rpc.regionServerStartup_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/requests": {
+            "metric": "regionserver.Server.totalRequestCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_99th_percentile": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_99th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_99th_percentile": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_99th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/storefiles": {
+            "metric": "regionserver.Server.storeFileCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/next_num_ops": {
+            "metric": "rpc.rpc.next_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time": {
+            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/slowDeleteCount": {
+            "metric": "regionserver.Server.slowDeleteCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndDelete_avg_time": {
+            "metric": "rpc.rpc.checkAndDelete_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/closeRegion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.closeRegion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getHServerInfo_avg_time": {
+            "metric": "rpc.rpc.getHServerInfo_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getZooKeeper_avg_time": {
+            "metric": "rpc.rpc.getZooKeeper_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/hlogFileCount": {
+            "metric": "hbase.regionserver.hlogFileCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+            "metric": "regionserver.Server.Get_95th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+            "metric": "regionserver.Server.Delete_95th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/memstoreSize": {
+            "metric": "regionserver.Server.memStoreSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_median": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_median",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolSignature_num_ops": {
+            "metric": "rpc.rpc.getProtocolSignature_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+            "metric": "regionserver.Server.Delete_75th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.rpcAuthorizationSuccesses",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+            "metric": "regionserver.Server.staticBloomSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndPut_num_ops": {
+            "metric": "rpc.rpc.checkAndPut_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/increment_avg_time": {
+            "metric": "rpc.rpc.increment_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/hbase/regionserver/slowPutCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/percentFilesLocal": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_min": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheFree": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheMissCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/flushQueueSize": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/slowAppendCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheSize": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/slowIncrementCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheEvictedCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/compactionQueueSize": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_median": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/slowGetCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/readRequestsCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_min": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/storefileIndexSizeMB": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_median": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_max": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_mean": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/requests": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/storefiles": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/writeRequestsCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_median": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/slowDeleteCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/stores": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_min": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/memstoreSize": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_max": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_mean": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_max": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/regions": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheHitCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/hbase/regionserver/compactionTime_avg_time": {
+            "metric": "hbase.regionserver.compactionTime_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/closeRegion_num_ops": {
+            "metric": "rpc.rpc.closeRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+            "metric": "regionserver.Server.mutationsWithoutWALSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/unassign_num_ops": {
+            "metric": "rpc.rpc.unassign_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/modifyTable_num_ops": {
+            "metric": "rpc.rpc.modifyTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
+            "pointInTime": tru

<TRUNCATED>

[03/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log
deleted file mode 100644
index 2010c02..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log
+++ /dev/null
@@ -1,29 +0,0 @@
-Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
-Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
-Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
-Jul 28, 2014 5:04:07 PM           2                  0 B             5.40 GB            9.79 GB
-Jul 28, 2014 5:05:14 PM           3                  0 B             5.06 GB            9.79 GB
-Jul 28, 2014 5:05:50 PM           4                  0 B             5.06 GB            9.79 GB
-Jul 28, 2014 5:06:56 PM           5                  0 B             4.81 GB            9.79 GB
-Jul 28, 2014 5:07:33 PM           6                  0 B             4.80 GB            9.79 GB
-Jul 28, 2014 5:09:11 PM           7                  0 B             4.29 GB            9.79 GB
-Jul 28, 2014 5:09:47 PM           8                  0 B             4.29 GB            9.79 GB
-Jul 28, 2014 5:11:24 PM           9                  0 B             3.89 GB            9.79 GB
-Jul 28, 2014 5:12:00 PM          10                  0 B             3.86 GB            9.79 GB
-Jul 28, 2014 5:13:37 PM          11                  0 B             3.23 GB            9.79 GB
-Jul 28, 2014 5:15:13 PM          12                  0 B             2.53 GB            9.79 GB
-Jul 28, 2014 5:15:49 PM          13                  0 B             2.52 GB            9.79 GB
-Jul 28, 2014 5:16:25 PM          14                  0 B             2.51 GB            9.79 GB
-Jul 28, 2014 5:17:01 PM          15                  0 B             2.39 GB            9.79 GB
-Jul 28, 2014 5:17:37 PM          16                  0 B             2.38 GB            9.79 GB
-Jul 28, 2014 5:18:14 PM          17                  0 B             2.31 GB            9.79 GB
-Jul 28, 2014 5:18:50 PM          18                  0 B             2.30 GB            9.79 GB
-Jul 28, 2014 5:19:26 PM          19                  0 B             2.21 GB            9.79 GB
-Jul 28, 2014 5:20:02 PM          20                  0 B             2.10 GB            9.79 GB
-Jul 28, 2014 5:20:38 PM          21                  0 B             2.06 GB            9.79 GB
-Jul 28, 2014 5:22:14 PM          22                  0 B             1.68 GB            9.79 GB
-Jul 28, 2014 5:23:20 PM          23                  0 B             1.00 GB            9.79 GB
-Jul 28, 2014 5:23:56 PM          24                  0 B          1016.16 MB            9.79 GB
-Jul 28, 2014 5:25:33 PM          25                  0 B            30.55 MB            9.79 GB
-The cluster is balanced. Exiting...
-Balancing took 24.858033333333335 minutes

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
deleted file mode 100644
index 0cce48c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import time
-import sys
-from threading import Thread
-
-
-def write_function(path, handle, interval):
-  with open(path) as f:
-      for line in f:
-          handle.write(line)
-          handle.flush()
-          time.sleep(interval)
-          
-thread = Thread(target =  write_function, args = ('balancer.log', sys.stdout, 1.5))
-thread.start()
-
-threaderr = Thread(target =  write_function, args = ('balancer-err.log', sys.stderr, 1.5 * 0.023))
-threaderr.start()
-
-thread.join()  
-
-
-def rebalancer_out():
-  write_function('balancer.log', sys.stdout)
-  
-def rebalancer_err():
-  write_function('balancer-err.log', sys.stdout)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
deleted file mode 100644
index 4afd4ce..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode.py
+++ /dev/null
@@ -1,88 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import datanode_upgrade
-from hdfs_datanode import datanode
-from resource_management import *
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
-from hdfs import hdfs
-
-
-class DataNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-
-
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing DataNode Rolling Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
-      Execute(format("hdp-select set hadoop-hdfs-datanode {version}"))
-
-
-  def post_rolling_restart(self, env):
-    Logger.info("Executing DataNode Rolling Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    # ensure the DataNode has started and rejoined the cluster
-    datanode_upgrade.post_upgrade_check()
-
-
-  def start(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    datanode(action="start")
-
-
-  def stop(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-
-    # pre-upgrade steps shutdown the datanode, so there's no need to call
-    # action=stop
-    if rolling_restart:
-      datanode_upgrade.pre_upgrade_shutdown()
-    else:
-      datanode(action="stop")
-
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hdfs()
-    datanode(action="configure")
-
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.datanode_pid_file)
-
-
-if __name__ == "__main__":
-  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode_upgrade.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode_upgrade.py
deleted file mode 100644
index 88af1f9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/datanode_upgrade.py
+++ /dev/null
@@ -1,114 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Execute
-from resource_management.core.shell import call
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.decorator import retry
-
-
-def pre_upgrade_shutdown():
-  """
-  Runs the "shutdownDatanode {ipc_address} upgrade" command to shutdown the
-  DataNode in preparation for an upgrade. This will then periodically check
-  "getDatanodeInfo" to ensure the DataNode has shutdown correctly.
-  This function will obtain the Kerberos ticket if security is enabled.
-  :return:
-  """
-  import params
-
-  Logger.info('DataNode executing "shutdownDatanode" command in preparation for upgrade...')
-  if params.security_enabled:
-    Execute(params.dn_kinit_cmd, user = params.hdfs_user)
-
-  command = format('hdfs dfsadmin -shutdownDatanode {dfs_dn_ipc_address} upgrade')
-  Execute(command, user=params.hdfs_user, tries=1 )
-
-  # verify that the datanode is down
-  _check_datanode_shutdown()
-
-
-def post_upgrade_check():
-  """
-  Verifies that the DataNode has rejoined the cluster. This function will
-  obtain the Kerberos ticket if security is enabled.
-  :return:
-  """
-  import params
-
-  Logger.info("Checking that the DataNode has rejoined the cluster after upgrade...")
-  if params.security_enabled:
-    Execute(params.dn_kinit_cmd,user = params.hdfs_user)
-
-  # verify that the datanode has started and rejoined the HDFS cluster
-  _check_datanode_startup()
-
-
-@retry(times=12, sleep_time=10, err_class=Fail)
-def _check_datanode_shutdown():
-  """
-  Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
-  several times, pausing in between runs. Once the DataNode stops responding
-  this method will return, otherwise it will raise a Fail(...) and retry
-  automatically.
-  :return:
-  """
-  import params
-
-  command = format('hdfs dfsadmin -getDatanodeInfo {dfs_dn_ipc_address}')
-
-  try:
-    Execute(command, user=params.hdfs_user, tries=1)
-  except:
-    Logger.info("DataNode has successfully shutdown for upgrade.")
-    return
-
-  Logger.info("DataNode has not shutdown.")
-  raise Fail('DataNode has not shutdown.')
-
-
-@retry(times=12, sleep_time=10, err_class=Fail)
-def _check_datanode_startup():
-  """
-  Checks that a DataNode is reported as being alive via the
-  "hdfs dfsadmin -report -live" command. Once the DataNode is found to be
-  alive this method will return, otherwise it will raise a Fail(...) and retry
-  automatically.
-  :return:
-  """
-  import params
-
-  try:
-    # 'su - hdfs -c "hdfs dfsadmin -report -live"'
-    command = 'hdfs dfsadmin -report -live'
-    return_code, hdfs_output = call(command, user=params.hdfs_user)
-  except:
-    raise Fail('Unable to determine if the DataNode has started after upgrade.')
-
-  if return_code == 0:
-    if params.hostname.lower() in hdfs_output.lower():
-      Logger.info("DataNode {0} reports that it has rejoined the cluster.".format(params.hostname))
-      return
-    else:
-      raise Fail("DataNode {0} was not found in the list of live DataNodes".format(params.hostname))
-
-  # return_code is not 0, fail
-  raise Fail("Unable to determine if the DataNode has started after upgrade (result code {0})".format(str(return_code)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs.py
deleted file mode 100644
index 25c1067..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-import os
-
-
-def hdfs(name=None):
-  import params
-
-  # On some OS this folder could be not exists, so we will create it before pushing there files
-  Directory(params.limits_conf_dir,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-
-  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
-       owner='root',
-       group='root',
-       mode=0644,
-       content=Template("hdfs.conf.j2")
-  )
-
-  if params.security_enabled:
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
-  if "hadoop-policy" in params.config['configurations']:
-    XmlConfig("hadoop-policy.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['hadoop-policy'],
-              configuration_attributes=params.config['configuration_attributes']['hadoop-policy'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  XmlConfig("core-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['core-site'],
-            configuration_attributes=params.config['configuration_attributes']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  File(os.path.join(params.hadoop_conf_dir, 'slaves'),
-       owner=tc_owner,
-       content=Template("slaves.j2")
-  )
-  
-  if params.lzo_enabled:
-    Package(params.lzo_packages_for_current_host)

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
deleted file mode 100644
index b9f244a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_client.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs import hdfs
-from utils import service
-
-
-class HdfsClient(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-
-  def stop(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def config(self, env):
-    import params
-    hdfs()
-    pass
-
-
-if __name__ == "__main__":
-  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
deleted file mode 100644
index d432d88..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_datanode.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.dfs_datanode_helper import handle_dfs_data_dir
-from utils import service
-
-
-def create_dirs(data_dir, params):
-  """
-  :param data_dir: The directory to create
-  :param params: parameters
-  """
-  Directory(data_dir,
-            recursive=True,
-            recursive_permission=True,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            ignore_failures=True
-  )
-
-
-def datanode(action=None):
-  import params
-  if action == "configure":
-    Directory(params.dfs_domain_socket_dir,
-              recursive=True,
-              mode=0751,
-              owner=params.hdfs_user,
-              group=params.user_group)
-
-    handle_dfs_data_dir(create_dirs, params)
-
-  elif action == "start" or action == "stop":
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
deleted file mode 100644
index 9f470a3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
+++ /dev/null
@@ -1,202 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.exceptions import ComponentIsNotRunning
-
-from utils import service, safe_zkfc_op
-
-
-def namenode(action=None, do_format=True, rolling_restart=False, env=None):
-  import params
-  #we need this directory to be present before any action(HA manual steps for
-  #additional namenode)
-  if action == "configure":
-    create_name_dirs(params.dfs_name_dir)
-
-  if action == "start":
-    if do_format:
-      format_namenode()
-      pass
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-    options = "-rollingUpgrade started" if rolling_restart else ""
-
-    service(
-      action="start",
-      name="namenode",
-      user=params.hdfs_user,
-      options=options,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-
-    if rolling_restart:    
-      # Must start Zookeeper Failover Controller if it exists on this host because it could have been killed in order to initiate the failover.
-      safe_zkfc_op(action, env)
-
-    if params.security_enabled:
-      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
-              user = params.hdfs_user)
-
-    if params.dfs_ha_enabled:
-      dfs_check_nn_status_cmd = as_user(format("hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
-    else:
-      dfs_check_nn_status_cmd = None
-
-    namenode_safe_mode_off = format("hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'")
-
-    # If HA is enabled and it is in standby, then stay in safemode, otherwise, leave safemode.
-    leave_safe_mode = True
-    if dfs_check_nn_status_cmd is not None:
-      code, out = shell.call(dfs_check_nn_status_cmd) # If active NN, code will be 0
-      if code != 0:
-        leave_safe_mode = False
-
-    if leave_safe_mode:
-      # First check if Namenode is not in 'safemode OFF' (equivalent to safemode ON), if so, then leave it
-      code, out = shell.call(namenode_safe_mode_off)
-      if code != 0:
-        leave_safe_mode_cmd = format("hdfs --config {hadoop_conf_dir} dfsadmin -safemode leave")
-        Execute(leave_safe_mode_cmd,
-                user=params.hdfs_user,
-                path=[params.hadoop_bin_dir],
-        )
-
-    # Verify if Namenode should be in safemode OFF
-    Execute(namenode_safe_mode_off,
-            tries=40,
-            try_sleep=10,
-            path=[params.hadoop_bin_dir],
-            user=params.hdfs_user,
-            only_if=dfs_check_nn_status_cmd #skip when HA not active
-    )
-    create_hdfs_directories(dfs_check_nn_status_cmd)
-
-  if action == "stop":
-    service(
-      action="stop", name="namenode", 
-      user=params.hdfs_user
-    )
-
-  if action == "decommission":
-    decommission()
-
-def create_name_dirs(directories):
-  import params
-
-  dirs = directories.split(",")
-  Directory(dirs,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            recursive=True,
-            recursive_permission=True
-  )
-
-
-def create_hdfs_directories(check):
-  import params
-
-  params.HdfsDirectory("/tmp",
-                       action="create_delayed",
-                       owner=params.hdfs_user,
-                       mode=0777
-  )
-  params.HdfsDirectory(params.smoke_hdfs_user_dir,
-                       action="create_delayed",
-                       owner=params.smoke_user,
-                       mode=params.smoke_hdfs_user_mode
-  )
-  params.HdfsDirectory(None, action="create",
-                       only_if=check #skip creation when HA not active
-  )
-
-def format_namenode(force=None):
-  import params
-
-  old_mark_dir = params.namenode_formatted_old_mark_dir
-  mark_dir = params.namenode_formatted_mark_dir
-  dfs_name_dir = params.dfs_name_dir
-  hdfs_user = params.hdfs_user
-  hadoop_conf_dir = params.hadoop_conf_dir
-
-  if not params.dfs_ha_enabled:
-    if force:
-      ExecuteHadoop('namenode -format',
-                    kinit_override=True,
-                    bin_dir=params.hadoop_bin_dir,
-                    conf_dir=hadoop_conf_dir)
-    else:
-      File(format("{tmp_dir}/checkForFormat.sh"),
-           content=StaticFile("checkForFormat.sh"),
-           mode=0755)
-      Execute(format(
-        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} "
-        "{hadoop_bin_dir} {old_mark_dir} {mark_dir} {dfs_name_dir}"),
-              not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
-      )
-    
-      Directory(mark_dir,
-        recursive = True
-      )
-
-
-def decommission():
-  import params
-
-  hdfs_user = params.hdfs_user
-  conf_dir = params.hadoop_conf_dir
-  user_group = params.user_group
-  nn_kinit_cmd = params.nn_kinit_cmd
-  
-  File(params.exclude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=hdfs_user,
-       group=user_group
-  )
-  
-  if not params.update_exclude_file_only:
-    Execute(nn_kinit_cmd,
-            user=hdfs_user
-    )
-
-    if params.dfs_ha_enabled:
-      # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
-      # need to execute each command scoped to a particular namenode
-      nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
-    else:
-      nn_refresh_cmd = format('dfsadmin -refreshNodes')
-    ExecuteHadoop(nn_refresh_cmd,
-                  user=hdfs_user,
-                  conf_dir=conf_dir,
-                  kinit_override=True,
-                  bin_dir=params.hadoop_bin_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py
deleted file mode 100644
index 1dc545e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_rebalance.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import re
-
-class HdfsParser():
-  def __init__(self):
-    self.initialLine = None
-    self.state = None
-  
-  def parseLine(self, line):
-    hdfsLine = HdfsLine()
-    type, matcher = hdfsLine.recognizeType(line)
-    if(type == HdfsLine.LineType.HeaderStart):
-      self.state = 'PROCESS_STARTED'
-    elif (type == HdfsLine.LineType.Progress):
-      self.state = 'PROGRESS'
-      hdfsLine.parseProgressLog(line, matcher)
-      if(self.initialLine == None): self.initialLine = hdfsLine
-      
-      return hdfsLine 
-    elif (type == HdfsLine.LineType.ProgressEnd):
-      self.state = 'PROCESS_FINISED'
-    return None
-    
-class HdfsLine():
-  
-  class LineType:
-    HeaderStart, Progress, ProgressEnd, Unknown = range(4)
-  
-  
-  MEMORY_SUFFIX = ['B','KB','MB','GB','TB','PB','EB']
-  MEMORY_PATTERN = '(?P<memmult_%d>(?P<memory_%d>(\d+)(.|,)?(\d+)?) (?P<mult_%d>'+"|".join(MEMORY_SUFFIX)+'))'
-  
-  HEADER_BEGIN_PATTERN = re.compile('Time Stamp\w+Iteration#\w+Bytes Already Moved\w+Bytes Left To Move\w+Bytes Being Moved')
-  PROGRESS_PATTERN = re.compile(
-                            "(?P<date>.*?)\s+" + 
-                            "(?P<iteration>\d+)\s+" + 
-                            MEMORY_PATTERN % (1,1,1) + "\s+" + 
-                            MEMORY_PATTERN % (2,2,2) + "\s+" +
-                            MEMORY_PATTERN % (3,3,3)
-                            )
-  PROGRESS_END_PATTERN = re.compile('(The cluster is balanced. Exiting...|The cluster is balanced. Exiting...)')
-  
-  def __init__(self):
-    self.date = None
-    self.iteration = None
-    self.bytesAlreadyMoved = None 
-    self.bytesLeftToMove = None
-    self.bytesBeingMoved = None 
-    self.bytesAlreadyMovedStr = None 
-    self.bytesLeftToMoveStr = None
-    self.bytesBeingMovedStr = None 
-  
-  def recognizeType(self, line):
-    for (type, pattern) in (
-                            (HdfsLine.LineType.HeaderStart, self.HEADER_BEGIN_PATTERN),
-                            (HdfsLine.LineType.Progress, self.PROGRESS_PATTERN), 
-                            (HdfsLine.LineType.ProgressEnd, self.PROGRESS_END_PATTERN)
-                            ):
-      m = re.match(pattern, line)
-      if m:
-        return type, m
-    return HdfsLine.LineType.Unknown, None
-    
-  def parseProgressLog(self, line, m):
-    '''
-    Parse the line of 'hdfs rebalancer' output. The example output being parsed:
-    
-    Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
-    Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
-    Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
-    
-    Throws AmbariException in case of parsing errors
-
-    '''
-    m = re.match(self.PROGRESS_PATTERN, line)
-    if m:
-      self.date = m.group('date') 
-      self.iteration = int(m.group('iteration'))
-       
-      self.bytesAlreadyMoved = self.parseMemory(m.group('memory_1'), m.group('mult_1')) 
-      self.bytesLeftToMove = self.parseMemory(m.group('memory_2'), m.group('mult_2')) 
-      self.bytesBeingMoved = self.parseMemory(m.group('memory_3'), m.group('mult_3'))
-       
-      self.bytesAlreadyMovedStr = m.group('memmult_1') 
-      self.bytesLeftToMoveStr = m.group('memmult_2')
-      self.bytesBeingMovedStr = m.group('memmult_3') 
-    else:
-      raise AmbariException("Failed to parse line [%s]") 
-  
-  def parseMemory(self, memorySize, multiplier_type):
-    try:
-      factor = self.MEMORY_SUFFIX.index(multiplier_type)
-    except ValueError:
-      raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type))
-    
-    return float(memorySize) * (1024 ** factor)
-  def toJson(self):
-    return {
-            'timeStamp' : self.date,
-            'iteration' : self.iteration,
-            
-            'dataMoved': self.bytesAlreadyMovedStr,
-            'dataLeft' : self.bytesLeftToMoveStr,
-            'dataBeingMoved': self.bytesBeingMovedStr,
-            
-            'bytesMoved': self.bytesAlreadyMoved,
-            'bytesLeft' : self.bytesLeftToMove,
-            'bytesBeingMoved': self.bytesBeingMoved,
-          }
-  def __str__(self):
-    return "[ date=%s,iteration=%d, bytesAlreadyMoved=%d, bytesLeftToMove=%d, bytesBeingMoved=%d]"%(self.date, self.iteration, self.bytesAlreadyMoved, self.bytesLeftToMove, self.bytesBeingMoved)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
deleted file mode 100644
index c650c4d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_snamenode.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-
-
-def snamenode(action=None, format=False):
-  import params
-
-  if action == "configure":
-    Directory(params.fs_checkpoint_dir,
-              recursive=True,
-              recursive_permission=True,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group)
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group)
-  elif action == "start" or action == "stop":
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
deleted file mode 100644
index f664bcd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/journalnode.py
+++ /dev/null
@@ -1,90 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
-from resource_management.libraries.functions.format import format
-
-from utils import service
-from hdfs import hdfs
-
-
-class JournalNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
-      Execute(format("hdp-select set hadoop-hdfs-journalnode {version}"))
-
-  def start(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    service(
-      action="start", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def stop(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def configure(self, env):
-    import params
-
-    Directory(params.jn_edits_dir,
-              recursive=True,
-              recursive_permission=True,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    env.set_params(params)
-    hdfs()
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.journalnode_pid_file)
-
-
-if __name__ == "__main__":
-  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
deleted file mode 100644
index c8a460f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
+++ /dev/null
@@ -1,162 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-import os
-import json
-import subprocess
-from datetime import datetime
-
-from resource_management import *
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.check_process_status import check_process_status
-
-from hdfs_namenode import namenode
-from hdfs import hdfs
-import hdfs_rebalance
-from utils import failover_namenode
-
-
-class NameNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-    #TODO we need this for HA because of manual steps
-    self.configure(env)
-
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
-      Execute(format("hdp-select set hadoop-hdfs-namenode {version}"))
-
-  def start(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    namenode(action="start", rolling_restart=rolling_restart, env=env)
-
-  def post_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    Execute("hdfs dfsadmin -report -live",
-            user=params.hdfs_principal_name if params.security_enabled else params.hdfs_user
-    )
-
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-
-    if rolling_restart and params.dfs_ha_enabled:
-      if params.dfs_ha_automatic_failover_enabled:
-        failover_namenode()
-      else:
-        raise Fail("Rolling Upgrade - dfs.ha.automatic-failover.enabled must be enabled to perform a rolling restart")
-
-    namenode(action="stop", rolling_restart=rolling_restart, env=env)
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    hdfs()
-    namenode(action="configure", env=env)
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.namenode_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="decommission")
-    pass
-  
-    
-  def rebalancehdfs(self, env):
-    import params
-    env.set_params(params)
-
-    name_node_parameters = json.loads( params.name_node_params )
-    threshold = name_node_parameters['threshold']
-    _print("Starting balancer with threshold = %s\n" % threshold)
-    
-    def calculateCompletePercent(first, current):
-      return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove
-    
-    
-    def startRebalancingProcess(threshold):
-      rebalanceCommand = format('hdfs --config {hadoop_conf_dir} balancer -threshold {threshold}')
-      return as_user(rebalanceCommand, params.hdfs_user, env={'PATH': params.hadoop_bin_dir})
-    
-    command = startRebalancingProcess(threshold)
-    
-    basedir = os.path.join(env.config.basedir, 'scripts')
-    if(threshold == 'DEBUG'): #FIXME TODO remove this on PROD
-      basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator')
-      command = ['python','hdfs-command.py']
-    
-    _print("Executing command %s\n" % command)
-    
-    parser = hdfs_rebalance.HdfsParser()
-    proc = subprocess.Popen(
-                            command, 
-                            stdout=subprocess.PIPE,
-                            stderr=subprocess.PIPE,
-                            shell=True,
-                            close_fds=True,
-                            cwd=basedir
-                           )
-    for line in iter(proc.stdout.readline, ''):
-      _print('[balancer] %s %s' % (str(datetime.now()), line ))
-      pl = parser.parseLine(line)
-      if pl:
-        res = pl.toJson()
-        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl) 
-        
-        self.put_structured_out(res)
-      elif parser.state == 'PROCESS_FINISED' : 
-        _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
-        self.put_structured_out({'completePercent' : 1})
-        break
-    
-    proc.stdout.close()
-    proc.wait()
-    if proc.returncode != None and proc.returncode != 0:
-      raise Fail('Hdfs rebalance process exited with error. See the log output')
-      
-def _print(line):
-  sys.stdout.write(line)
-  sys.stdout.flush()
-
-if __name__ == "__main__":
-  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
deleted file mode 100644
index 12353de..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
+++ /dev/null
@@ -1,289 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
-import status_params
-import utils
-import os
-import itertools
-import re
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
-version = default("/commandParams/version", None)
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = status_params.hdfs_user
-hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-
-# Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
-secure_dn_ports_are_in_use = False
-
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-  hadoop_libexec_dir = "/usr/hdp/current/hadoop-client/libexec"
-  hadoop_bin = "/usr/hdp/current/hadoop-client/sbin"    # TODO Rolling Upgrade, switch from hadoop-client to server when starting daemon.
-  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
-  hadoop_home = "/usr/hdp/current/hadoop-client"
-  if not security_enabled:
-    hadoop_secure_dn_user = '""'
-  else:
-    dfs_dn_port = utils.get_port(dfs_dn_addr)
-    dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
-    dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
-    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
-    if dfs_http_policy == "HTTPS_ONLY":
-      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
-    elif dfs_http_policy == "HTTP_AND_HTTPS":
-      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
-    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
-    if secure_dn_ports_are_in_use:
-      hadoop_secure_dn_user = hdfs_user
-    else:
-      hadoop_secure_dn_user = '""'
-else:
-  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-  hadoop_bin = "/usr/lib/hadoop/sbin"
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_home = "/usr/lib/hadoop"
-  hadoop_secure_dn_user = hdfs_user
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-limits_conf_dir = "/etc/security/limits.d"
-
-execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
-ulimit_cmd = "ulimit -c unlimited && "
-
-#security params
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-falcon_user = config['configurations']['falcon-env']['falcon_user']
-
-#exclude file
-hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
-exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
-
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-nm_host = default("/clusterHostInfo/nm_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_histroryserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-has_falcon_host = not len(falcon_host)  == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-#users and groups
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-oozie_user = config['configurations']['oozie-env']['oozie_user']
-webhcat_user = config['configurations']['hive-env']['hcat_user']
-hcat_user = config['configurations']['hive-env']['hcat_user']
-hive_user = config['configurations']['hive-env']['hive_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
-dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-
-jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
-
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-
-namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
-namenode_dirs_stub_filename = "namenode_dirs_created"
-
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 0770
-
-namenode_formatted_old_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
-namenode_formatted_mark_dir = format("/var/lib/hdfs/namenode/formatted/")
-
-fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
-
-dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
-dfs_data_dir = ",".join([re.sub(r'^\[.+\]', '', dfs_dir.strip()) for dfs_dir in dfs_data_dir.split(",")])
-
-data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
-
-namenode_id = None
-namenode_rpc = None
-
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-      namenode_rpc = nn_host
-
-journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
-if journalnode_address:
-  journalnode_port = journalnode_address.split(":")[1]
-  
-  
-if security_enabled:
-  _dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-  _dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-  _dn_principal_name = _dn_principal_name.replace('_HOST',hostname.lower())
-  
-  dn_kinit_cmd = format("{kinit_path_local} -kt {_dn_keytab} {_dn_principal_name};")
-  
-  _nn_principal_name = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-  _nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-  _nn_principal_name = _nn_principal_name.replace('_HOST',hostname.lower())
-  
-  nn_kinit_cmd = format("{kinit_path_local} -kt {_nn_keytab} {_nn_principal_name};")  
-else:
-  dn_kinit_cmd = ""
-  nn_kinit_cmd = ""  
-
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)
-
-io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
-lzo_enabled = "com.hadoop.compression.lzo" in io_compression_codecs
-# stack_is_hdp22_or_further
-underscorred_version = stack_version_unformatted.replace('.', '_')
-dashed_version = stack_version_unformatted.replace('.', '-')
-lzo_packages_to_family = {
-  "any": ["hadoop-lzo"],
-  "redhat": ["lzo", "hadoop-lzo-native"],
-  "suse": ["lzo", "hadoop-lzo-native"],
-  "ubuntu": ["liblzo2-2"]
-}
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  lzo_packages_to_family["redhat"] += [format("hadooplzo_{underscorred_version}_*")]
-  lzo_packages_to_family["suse"] += [format("hadooplzo_{underscorred_version}_*")]
-  lzo_packages_to_family["ubuntu"] += [format("hadooplzo_{dashed_version}_*")]
-
-lzo_packages_for_current_host = lzo_packages_to_family['any'] + lzo_packages_to_family[System.get_instance().os_family]
-all_lzo_packages = set(itertools.chain(*lzo_packages_to_family.values()))
- 
-exclude_packages = []
-if not lzo_enabled:
-  exclude_packages += all_lzo_packages
-  
-name_node_params = default("/commandParams/namenode", None)
-
-#hadoop params
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.0') >= 0 and compare_versions(hdp_stack_version, '2.1') < 0 and System.get_instance().os_family != "suse":
-  # deprecated rhel jsvc_path
-  jsvc_path = "/usr/libexec/bigtop-utils"
-else:
-  jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
deleted file mode 100644
index 3dc3a1b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
+++ /dev/null
@@ -1,119 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class HdfsServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    unique = functions.get_unique_id_and_date()
-    dir = '/tmp'
-    tmp_file = format("{dir}/{unique}")
-
-    safemode_command = "dfsadmin -safemode get | grep OFF"
-
-    create_dir_cmd = format("fs -mkdir {dir}")
-    chmod_command = format("fs -chmod 777 {dir}")
-    test_dir_exists = as_user(format("{hadoop_bin_dir}/hadoop --config {hadoop_conf_dir} fs -test -e {dir}"), params.smoke_user)
-    cleanup_cmd = format("fs -rm {tmp_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file
-    #that needs cleanup; exit code is fn of second command
-    create_file_cmd = format(
-      "{cleanup_cmd}; hadoop --config {hadoop_conf_dir} fs -put /etc/passwd {tmp_file}")
-    test_cmd = format("fs -test -e {tmp_file}")
-    if params.security_enabled:
-      Execute(format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_user}"),
-        user=params.smoke_user
-      )
-    ExecuteHadoop(safemode_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=20,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(create_dir_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  not_if=test_dir_exists,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(chmod_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(create_file_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    ExecuteHadoop(test_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5,
-                  bin_dir=params.hadoop_bin_dir
-    )
-    if params.has_journalnode_hosts:
-      journalnode_port = params.journalnode_port
-      checkWebUIFileName = "checkWebUI.py"
-      checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
-      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-      checkWebUICmd = format("python {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port}")
-      File(checkWebUIFilePath,
-           content=StaticFile(checkWebUIFileName),
-           mode=0775)
-
-      Execute(checkWebUICmd,
-              logoutput=True,
-              try_sleep=3,
-              tries=5,
-              user=params.smoke_user
-      )
-
-    if params.is_namenode_master:
-      if params.has_zkfc_hosts:
-        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-        check_zkfc_process_cmd = format(
-          "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
-        Execute(check_zkfc_process_cmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5
-        )
-
-
-if __name__ == "__main__":
-  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
deleted file mode 100644
index 7106422..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/snamenode.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_snamenode import snamenode
-from hdfs import hdfs
-
-
-class SNameNode(Script):
-
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env, params.exclude_packages)
-
-  def pre_rolling_restart(self, env):
-    # Secondary namenode is actually removed in an HA cluster, which is a pre-requisite for Rolling Upgrade,
-    # so it does not need any Rolling Restart logic.
-    pass
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-
-    self.configure(env)
-    snamenode(action="start")
-
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-
-    snamenode(action="stop")
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    hdfs()
-    snamenode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.snamenode_pid_file)
-
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
deleted file mode 100644
index 0027a4c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
deleted file mode 100644
index 6f421b6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/utils.py
+++ /dev/null
@@ -1,230 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-import re
-
-from resource_management import *
-from resource_management.libraries.functions.format import format
-from resource_management.core.shell import call, checked_call
-from resource_management.core.exceptions import ComponentIsNotRunning
-
-from zkfc_slave import ZkfcSlave
-
-def safe_zkfc_op(action, env):
-  """
-  Idempotent operation on the zkfc process to either start or stop it.
-  :param action: start or stop
-  :param env: environment
-  """
-  zkfc = None
-  if action == "start":
-    try:
-      zkfc = ZkfcSlave()
-      zkfc.status(env)
-    except ComponentIsNotRunning:
-      if zkfc:
-        zkfc.start(env)
-
-  if action == "stop":
-    try:
-      zkfc = ZkfcSlave()
-      zkfc.status(env)
-    except ComponentIsNotRunning:
-      pass
-    else:
-      if zkfc:
-        zkfc.stop(env)
-
-
-def failover_namenode():
-  """
-  Failover the primary namenode by killing zkfc if it exists on this host (assuming this host is the primary).
-  """
-  import params
-  check_service_cmd = format("hdfs haadmin -getServiceState {namenode_id}")
-  code, out = call(check_service_cmd, verbose=True, logoutput=True, user=params.hdfs_user)
-
-  state = "unknown"
-  if code == 0 and out:
-    state = "active" if "active" in out else ("standby" if "standby" in out else state)
-    Logger.info("Namenode service state: %s" % state)
-
-  if state == "active":
-    Logger.info("Rolling Upgrade - Initiating namenode failover by killing zkfc on active namenode")
-
-    # Forcefully kill ZKFC on this host to initiate a failover
-    kill_zkfc(params.hdfs_user)
-
-    # Wait until it transitions to standby
-    check_standby_cmd = format("hdfs haadmin -getServiceState {namenode_id} | grep standby")
-    Execute(check_standby_cmd,
-            user=params.hdfs_user,
-            tries=30,
-            try_sleep=6,
-            logoutput=True)
-  else:
-    Logger.info("Rolling Upgrade - Host %s is the standby namenode." % str(params.hostname))
-
-
-def kill_zkfc(zkfc_user):
-  """
-  There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
-  Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
-  Option 2. Silent failover (not supported as of HDP 2.2.0.0)
-  :param zkfc_user: User that started the ZKFC process.
-  """
-  import params
-  if params.dfs_ha_enabled:
-    zkfc_pid_file = get_service_pid_file("zkfc", zkfc_user)
-    if zkfc_pid_file:
-      check_process = format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1")
-      code, out = call(check_process, verbose=True)
-      if code == 0:
-        Logger.debug("ZKFC is running and will be killed to initiate namenode failover.")
-        kill_command = format("{check_process} && kill -9 `cat {zkfc_pid_file}` > /dev/null 2>&1")
-        checked_call(kill_command, verbose=True)
-
-
-def get_service_pid_file(name, user):
-  """
-  Get the pid file path that was used to start the service by the user.
-  :param name: Service name
-  :param user: User that started the service.
-  :return: PID file path
-  """
-  import params
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  return pid_file
-
-
-def service(action=None, name=None, user=None, options="", create_pid_dir=False,
-            create_log_dir=False):
-  """
-  :param action: Either "start" or "stop"
-  :param name: Component name, e.g., "namenode", "datanode", "secondarynamenode", "zkfc"
-  :param user: User to run the command as
-  :param options: Additional options to pass to command as a string
-  :param create_pid_dir: Create PID directory
-  :param create_log_dir: Crate log file directory
-  """
-  import params
-
-  options = options if options else ""
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  log_dir = format("{hdfs_log_dir_prefix}/{user}")
-  check_process = format(
-    "ls {pid_file} >/dev/null 2>&1 &&"
-    " ps -p `cat {pid_file}` >/dev/null 2>&1")
-
-  if create_pid_dir:
-    Directory(pid_dir,
-              owner=user,
-              recursive=True)
-  if create_log_dir:
-    Directory(log_dir,
-              owner=user,
-              recursive=True)
-
-  hadoop_env_exports = {
-    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
-  }
-
-  if params.security_enabled and name == "datanode":
-    ## The directory where pid files are stored in the secure data environment.
-    hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-    hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
-
-    # At Champlain stack and further, we may start datanode as a non-root even in secure cluster
-    if not (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0) or params.secure_dn_ports_are_in_use:
-      user = "root"
-      pid_file = format(
-        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-
-    if action == 'stop' and (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0) and \
-      os.path.isfile(hadoop_secure_dn_pid_file):
-        # We need special handling for this case to handle the situation
-        # when we configure non-root secure DN and then restart it
-        # to handle new configs. Otherwise we will not be able to stop
-        # a running instance 
-        user = "root"
-        
-        try:
-          check_process_status(hadoop_secure_dn_pid_file)
-          
-          custom_export = {
-            'HADOOP_SECURE_DN_USER': params.hdfs_user
-          }
-          hadoop_env_exports.update(custom_export)
-          
-        except ComponentIsNotRunning:
-          pass
-
-  hadoop_daemon = format("{hadoop_bin}/hadoop-daemon.sh")
-
-  if user == "root":
-    cmd = [hadoop_daemon, "--config", params.hadoop_conf_dir, action, name]
-    if options:
-      cmd += [options, ]
-    daemon_cmd = as_sudo(cmd)
-  else:
-    cmd = format("{ulimit_cmd} {hadoop_daemon} --config {hadoop_conf_dir} {action} {name}")
-    if options:
-      cmd += " " + options
-    daemon_cmd = as_user(cmd, user)
-     
-  service_is_up = check_process if action == "start" else None
-  #remove pid file from dead process
-  File(pid_file,
-       action="delete",
-       not_if=check_process
-  )
-  Execute(daemon_cmd,
-          not_if=service_is_up,
-          environment=hadoop_env_exports
-  )
-
-  if action == "stop":
-    File(pid_file,
-         action="delete",
-    )
-
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None:
-    return int(m.group(2))
-  else:
-    return None
-
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
deleted file mode 100644
index ee8b418..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/zkfc_slave.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.check_process_status import check_process_status
-
-import utils  # this is needed to avoid a circular dependency since utils.py calls this class
-from hdfs import hdfs
-
-
-class ZkfcSlave(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env, params.exclude_packages)
-    env.set_params(params)
-
-  def start(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-    Directory(params.hadoop_pid_dir_prefix,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    utils.service(
-      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def stop(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    utils.service(
-      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def configure(self, env):
-    hdfs()
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.zkfc_pid_file)
-
-
-if __name__ == "__main__":
-  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index a92cdc1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/hdfs.conf.j2
deleted file mode 100644
index d58a6f5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/hdfs.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{hdfs_user}}   - nofile 32768
-{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/slaves.j2
deleted file mode 100644
index 4a9e713..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/templates/slaves.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/alerts.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/alerts.json
deleted file mode 100644
index 5344414..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/alerts.json
+++ /dev/null
@@ -1,58 +0,0 @@
-{
-  "ZOOKEEPER": {
-    "service": [
-      {
-        "name": "zookeeper_server_process_percent",
-        "label": "Percent ZooKeeper Servers Available",
-        "description": "This alert is triggered if the number of down ZooKeeper servers in the cluster is greater than the configured critical threshold. It aggregates the results of ZooKeeper process checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "zookeeper_server_process",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.35
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.70
-            }
-          }
-        }
-      }  
-    ],
-    "ZOOKEEPER_SERVER": [
-      {
-        "name": "zookeeper_server_process",
-        "label": "ZooKeeper Server Process",
-        "description": "This host-level alert is triggered if the ZooKeeper server process cannot be determined to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "PORT",
-          "uri": "{{zookeeper-env/clientPort}}",
-          "default_port": 2181,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file


[35/37] ambari git commit: AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)

Posted by jl...@apache.org.
AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/46e9bcb8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/46e9bcb8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/46e9bcb8

Branch: refs/heads/trunk
Commit: 46e9bcb845ee32186744536b3b77b735c4363631
Parents: b4cd4cb
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Wed Dec 17 11:59:50 2014 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Dec 17 11:59:50 2014 -0800

----------------------------------------------------------------------
 .../common-services/HIVE/0.12.0.2.0/alerts.json |   60 +
 .../HIVE/0.12.0.2.0/configuration/hcat-env.xml  |   57 +
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml  |  134 ++
 .../configuration/hive-exec-log4j.xml           |  111 ++
 .../0.12.0.2.0/configuration/hive-log4j.xml     |  120 ++
 .../HIVE/0.12.0.2.0/configuration/hive-site.xml |  329 ++++
 .../0.12.0.2.0/configuration/webhcat-env.xml    |   54 +
 .../0.12.0.2.0/configuration/webhcat-site.xml   |  135 ++
 .../0.12.0.2.0/etc/hive-schema-0.12.0.mysql.sql |  777 ++++++++++
 .../etc/hive-schema-0.12.0.oracle.sql           |  718 +++++++++
 .../etc/hive-schema-0.12.0.postgres.sql         | 1406 ++++++++++++++++++
 .../HIVE/0.12.0.2.0/metainfo.xml                |  294 ++++
 .../HIVE/0.12.0.2.0/package/alerts.json         |   60 +
 .../package/configuration/hcat-env.xml          |   57 +
 .../package/configuration/hive-env.xml          |  134 ++
 .../package/configuration/hive-exec-log4j.xml   |  111 ++
 .../package/configuration/hive-log4j.xml        |  120 ++
 .../package/configuration/hive-site.xml         |  329 ++++
 .../package/configuration/webhcat-env.xml       |   54 +
 .../package/configuration/webhcat-site.xml      |  135 ++
 .../package/etc/hive-schema-0.12.0.mysql.sql    |  777 ++++++++++
 .../package/etc/hive-schema-0.12.0.oracle.sql   |  718 +++++++++
 .../package/etc/hive-schema-0.12.0.postgres.sql | 1406 ++++++++++++++++++
 .../0.12.0.2.0/package/files/addMysqlUser.sh    |   34 +
 .../package/files/alert_hive_thrift_port.py     |  127 ++
 .../package/files/alert_webhcat_server.py       |  111 ++
 .../HIVE/0.12.0.2.0/package/files/hcatSmoke.sh  |   36 +
 .../HIVE/0.12.0.2.0/package/files/hiveSmoke.sh  |   24 +
 .../0.12.0.2.0/package/files/hiveserver2.sql    |   23 +
 .../package/files/hiveserver2Smoke.sh           |   32 +
 .../HIVE/0.12.0.2.0/package/files/pigSmoke.sh   |   18 +
 .../0.12.0.2.0/package/files/removeMysqlUser.sh |   33 +
 .../0.12.0.2.0/package/files/startMetastore.sh  |   23 +
 .../0.12.0.2.0/package/files/templetonSmoke.sh  |   96 ++
 .../HIVE/0.12.0.2.0/package/metainfo.xml        |  294 ++++
 .../HIVE/0.12.0.2.0/package/scripts/__init__.py |   19 +
 .../HIVE/0.12.0.2.0/package/scripts/hcat.py     |   58 +
 .../0.12.0.2.0/package/scripts/hcat_client.py   |   43 +
 .../package/scripts/hcat_service_check.py       |   80 +
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |  232 +++
 .../0.12.0.2.0/package/scripts/hive_client.py   |   42 +
 .../package/scripts/hive_metastore.py           |   64 +
 .../0.12.0.2.0/package/scripts/hive_server.py   |   70 +
 .../0.12.0.2.0/package/scripts/hive_service.py  |  115 ++
 .../0.12.0.2.0/package/scripts/install_jars.py  |  107 ++
 .../0.12.0.2.0/package/scripts/mysql_server.py  |   60 +
 .../0.12.0.2.0/package/scripts/mysql_service.py |   52 +
 .../0.12.0.2.0/package/scripts/mysql_users.py   |   65 +
 .../HIVE/0.12.0.2.0/package/scripts/params.py   |  282 ++++
 .../0.12.0.2.0/package/scripts/service_check.py |   53 +
 .../0.12.0.2.0/package/scripts/status_params.py |   36 +
 .../HIVE/0.12.0.2.0/package/scripts/webhcat.py  |  143 ++
 .../package/scripts/webhcat_server.py           |   53 +
 .../package/scripts/webhcat_service.py          |   42 +
 .../package/scripts/webhcat_service_check.py    |   41 +
 .../package/templates/startHiveserver2.sh.j2    |   24 +
 .../common-services/OOZIE/4.0.0.2.0/alerts.json |   42 +
 .../OOZIE/4.0.0.2.0/configuration/oozie-env.xml |  139 ++
 .../4.0.0.2.0/configuration/oozie-log4j.xml     |   97 ++
 .../4.0.0.2.0/configuration/oozie-site.xml      |  312 ++++
 .../OOZIE/4.0.0.2.0/metainfo.xml                |  165 ++
 .../package/files/alert_check_oozie_server.py   |   74 +
 .../4.0.0.2.0/package/files/oozieSmoke2.sh      |  114 ++
 .../4.0.0.2.0/package/files/wrap_ooziedb.sh     |   31 +
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |  206 +++
 .../4.0.0.2.0/package/scripts/oozie_client.py   |   43 +
 .../4.0.0.2.0/package/scripts/oozie_server.py   |   56 +
 .../4.0.0.2.0/package/scripts/oozie_service.py  |   80 +
 .../OOZIE/4.0.0.2.0/package/scripts/params.py   |  170 +++
 .../4.0.0.2.0/package/scripts/service_check.py  |   60 +
 .../4.0.0.2.0/package/scripts/status_params.py  |   26 +
 .../package/templates/adminusers.txt.j2         |   24 +
 .../package/templates/oozie-log4j.properties.j2 |   92 ++
 .../stacks/HDP/2.0.6/services/HIVE/alerts.json  |   60 -
 .../services/HIVE/configuration/hcat-env.xml    |   57 -
 .../services/HIVE/configuration/hive-env.xml    |  134 --
 .../HIVE/configuration/hive-exec-log4j.xml      |  111 --
 .../services/HIVE/configuration/hive-log4j.xml  |  120 --
 .../services/HIVE/configuration/hive-site.xml   |  329 ----
 .../services/HIVE/configuration/webhcat-env.xml |   54 -
 .../HIVE/configuration/webhcat-site.xml         |  135 --
 .../HIVE/etc/hive-schema-0.12.0.mysql.sql       |  777 ----------
 .../HIVE/etc/hive-schema-0.12.0.oracle.sql      |  718 ---------
 .../HIVE/etc/hive-schema-0.12.0.postgres.sql    | 1406 ------------------
 .../stacks/HDP/2.0.6/services/HIVE/metainfo.xml |  270 +---
 .../services/HIVE/package/files/addMysqlUser.sh |   34 -
 .../package/files/alert_hive_thrift_port.py     |  127 --
 .../HIVE/package/files/alert_webhcat_server.py  |  111 --
 .../services/HIVE/package/files/hcatSmoke.sh    |   36 -
 .../services/HIVE/package/files/hiveSmoke.sh    |   24 -
 .../services/HIVE/package/files/hiveserver2.sql |   23 -
 .../HIVE/package/files/hiveserver2Smoke.sh      |   32 -
 .../services/HIVE/package/files/pigSmoke.sh     |   18 -
 .../HIVE/package/files/removeMysqlUser.sh       |   33 -
 .../HIVE/package/files/startMetastore.sh        |   23 -
 .../HIVE/package/files/templetonSmoke.sh        |   96 --
 .../services/HIVE/package/scripts/__init__.py   |   19 -
 .../2.0.6/services/HIVE/package/scripts/hcat.py |   58 -
 .../HIVE/package/scripts/hcat_client.py         |   43 -
 .../HIVE/package/scripts/hcat_service_check.py  |   80 -
 .../2.0.6/services/HIVE/package/scripts/hive.py |  232 ---
 .../HIVE/package/scripts/hive_client.py         |   42 -
 .../HIVE/package/scripts/hive_metastore.py      |   64 -
 .../HIVE/package/scripts/hive_server.py         |   70 -
 .../HIVE/package/scripts/hive_service.py        |  115 --
 .../HIVE/package/scripts/install_jars.py        |  107 --
 .../HIVE/package/scripts/mysql_server.py        |   60 -
 .../HIVE/package/scripts/mysql_service.py       |   52 -
 .../HIVE/package/scripts/mysql_users.py         |   65 -
 .../services/HIVE/package/scripts/params.py     |  282 ----
 .../HIVE/package/scripts/service_check.py       |   53 -
 .../HIVE/package/scripts/status_params.py       |   36 -
 .../services/HIVE/package/scripts/webhcat.py    |  143 --
 .../HIVE/package/scripts/webhcat_server.py      |   53 -
 .../HIVE/package/scripts/webhcat_service.py     |   42 -
 .../package/scripts/webhcat_service_check.py    |   41 -
 .../package/templates/startHiveserver2.sh.j2    |   24 -
 .../stacks/HDP/2.0.6/services/OOZIE/alerts.json |   42 -
 .../services/OOZIE/configuration/oozie-env.xml  |  139 --
 .../OOZIE/configuration/oozie-log4j.xml         |   97 --
 .../services/OOZIE/configuration/oozie-site.xml |  312 ----
 .../HDP/2.0.6/services/OOZIE/metainfo.xml       |  141 +-
 .../package/files/alert_check_oozie_server.py   |   74 -
 .../services/OOZIE/package/files/oozieSmoke2.sh |  114 --
 .../OOZIE/package/files/wrap_ooziedb.sh         |   31 -
 .../services/OOZIE/package/scripts/oozie.py     |  206 ---
 .../OOZIE/package/scripts/oozie_client.py       |   43 -
 .../OOZIE/package/scripts/oozie_server.py       |   56 -
 .../OOZIE/package/scripts/oozie_service.py      |   80 -
 .../services/OOZIE/package/scripts/params.py    |  170 ---
 .../OOZIE/package/scripts/service_check.py      |   60 -
 .../OOZIE/package/scripts/status_params.py      |   26 -
 .../OOZIE/package/templates/adminusers.txt.j2   |   24 -
 .../package/templates/oozie-log4j.properties.j2 |   92 --
 .../stacks/2.0.6/HIVE/test_hcat_client.py       |   14 +-
 .../stacks/2.0.6/HIVE/test_hive_client.py       |   14 +-
 .../stacks/2.0.6/HIVE/test_hive_metastore.py    |   38 +-
 .../stacks/2.0.6/HIVE/test_hive_server.py       |   44 +-
 .../2.0.6/HIVE/test_hive_service_check.py       |   14 +-
 .../stacks/2.0.6/HIVE/test_mysql_server.py      |   50 +-
 .../stacks/2.0.6/HIVE/test_webhcat_server.py    |   38 +-
 .../stacks/2.0.6/OOZIE/test_oozie_client.py     |   20 +-
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |   45 +-
 .../stacks/2.0.6/OOZIE/test_service_check.py    |   15 +-
 .../stacks/2.1/HIVE/test_hive_metastore.py      |   40 +-
 145 files changed, 12590 insertions(+), 8217 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/alerts.json b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/alerts.json
new file mode 100644
index 0000000..3762492
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/alerts.json
@@ -0,0 +1,60 @@
+{
+  "HIVE": {
+    "service": [],
+    "HIVE_METASTORE": [
+      {
+        "name": "hive_metastore_process",
+        "label": "Hive Metastore Process",
+        "description": "This host-level alert is triggered if the Hive Metastore process cannot be determined to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hive-site/hive.metastore.uris}}",
+          "default_port": 9083,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "HIVE_SERVER": [
+      {
+        "name": "hive_server_process",
+        "label": "HiveServer2 Process",
+        "description": "This host-level alert is triggered if the HiveServer cannot be determined to be up and responding to client requests.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDP/2.0.6/services/HIVE/package/files/alert_hive_thrift_port.py"
+        }
+      }
+    ],
+    "WEBHCAT_SERVER": [
+      {
+        "name": "hive_webhcat_server_status",
+        "label": "WebHCat Server Status",
+        "description": "This host-level alert is triggered if the templeton server status is not healthy.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDP/2.0.6/services/HIVE/package/files/alert_webhcat_server.py"
+        }
+      }    
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hcat-env.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hcat-env.xml
new file mode 100644
index 0000000..91b402b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hcat-env.xml
@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hcat-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hcat-env.sh file</description>
+    <value>
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements. See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership. The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License. You may obtain a copy of the License at
+      #
+      # http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing, software
+      # distributed under the License is distributed on an "AS IS" BASIS,
+      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      # See the License for the specific language governing permissions and
+      # limitations under the License.
+
+      JAVA_HOME={{java64_home}}
+      HCAT_PID_DIR={{hcat_pid_dir}}/
+      HCAT_LOG_DIR={{hcat_log_dir}}/
+      HCAT_CONF_DIR={{hcat_conf_dir}}
+      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+      #DBROOT is the path where the connector jars are downloaded
+      DBROOT={{hcat_dbroot}}
+      USER={{hcat_user}}
+      METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
new file mode 100644
index 0000000..52cfa10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-env.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hive_database_type</name>
+    <value>mysql</value>
+    <description>Default HIVE DB type.</description>
+  </property>
+  <property>
+    <name>hive_database</name>
+    <value>New MySQL Database</value>
+    <description>
+      Property that determines whether the HIVE DB is managed by Ambari.
+    </description>
+  </property>
+  <property>
+    <name>hive_ambari_database</name>
+    <value>MySQL</value>
+    <description>Database type.</description>
+  </property>
+  <property>
+    <name>hive_database_name</name>
+    <value>hive</value>
+    <description>Database name.</description>
+  </property>
+  <property>
+    <name>hive_dbroot</name>
+    <value>/usr/lib/hive/lib/</value>
+    <description>Hive DB Directory.</description>
+  </property>
+  <property>
+    <name>hive_log_dir</name>
+    <value>/var/log/hive</value>
+    <description>Directory for Hive Log files.</description>
+  </property>
+  <property>
+    <name>hive_pid_dir</name>
+    <value>/var/run/hive</value>
+    <description>Hive PID Dir.</description>
+  </property>
+  <property>
+    <name>hive_user</name>
+    <value>hive</value>
+    <property-type>USER</property-type>
+    <description>Hive User.</description>
+  </property>
+
+  <!--HCAT-->
+
+  <property>
+    <name>hcat_log_dir</name>
+    <value>/var/log/webhcat</value>
+    <description>WebHCat Log Dir.</description>
+  </property>
+  <property>
+    <name>hcat_pid_dir</name>
+    <value>/var/run/webhcat</value>
+    <description>WebHCat Pid Dir.</description>
+  </property>
+  <property>
+    <name>hcat_user</name>
+    <value>hcat</value>
+    <property-type>USER</property-type>
+    <description>HCat User.</description>
+  </property>
+  <property>
+    <name>webhcat_user</name>
+    <value>hcat</value>
+    <property-type>USER</property-type>
+    <description>WebHCat User.</description>
+  </property>
+  
+  <!-- hive-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hive-env.sh file</description>
+    <value>
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hive_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_config_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+elif [ -d "/usr/lib/hive-hcatalog/" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar
+else
+  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar
+fi
+export METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-exec-log4j.xml
new file mode 100644
index 0000000..fb852f7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-exec-log4j.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom hive-exec-log4j</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+
+hive.log.threshold=ALL
+hive.root.logger=INFO,FA
+hive.log.dir=${java.io.tmpdir}/${user.name}
+hive.query.id=hadoop
+hive.log.file=${hive.query.id}.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hive.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=${hive.log.threshold}
+
+#
+# File Appender
+#
+
+log4j.appender.FA=org.apache.log4j.FileAppender
+log4j.appender.FA.File=${hive.log.dir}/${hive.log.file}
+log4j.appender.FA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#custom logging levels
+#log4j.logger.xxx=DEBUG
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
+
+
+log4j.category.DataNucleus=ERROR,FA
+log4j.category.Datastore=ERROR,FA
+log4j.category.Datastore.Schema=ERROR,FA
+log4j.category.JPOX.Datastore=ERROR,FA
+log4j.category.JPOX.Plugin=ERROR,FA
+log4j.category.JPOX.MetaData=ERROR,FA
+log4j.category.JPOX.Query=ERROR,FA
+log4j.category.JPOX.General=ERROR,FA
+log4j.category.JPOX.Enhancer=ERROR,FA
+
+
+# Silence useless ZK logs
+log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA
+log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-log4j.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-log4j.xml
new file mode 100644
index 0000000..a978ef7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-log4j.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hive.log.threshold=ALL
+hive.root.logger=INFO,DRFA
+hive.log.dir=${java.io.tmpdir}/${user.name}
+hive.log.file=hive.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hive.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=${hive.log.threshold}
+
+#
+# Daily Rolling File Appender
+#
+# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files
+# for different CLI session.
+#
+# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+
+log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+log4j.appender.console.encoding=UTF-8
+
+#custom logging levels
+#log4j.logger.xxx=DEBUG
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
+
+
+log4j.category.DataNucleus=ERROR,DRFA
+log4j.category.Datastore=ERROR,DRFA
+log4j.category.Datastore.Schema=ERROR,DRFA
+log4j.category.JPOX.Datastore=ERROR,DRFA
+log4j.category.JPOX.Plugin=ERROR,DRFA
+log4j.category.JPOX.MetaData=ERROR,DRFA
+log4j.category.JPOX.Query=ERROR,DRFA
+log4j.category.JPOX.General=ERROR,DRFA
+log4j.category.JPOX.Enhancer=ERROR,DRFA
+
+
+# Silence useless ZK logs
+log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
+log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
new file mode 100644
index 0000000..67be680
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/hive-site.xml
@@ -0,0 +1,329 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>hive.heapsize</name>
+    <value>1024</value>
+    <description>Hive Java heap size</description>
+  </property>
+
+  <property>
+    <name>ambari.hive.db.schema.name</name>
+    <value>hive</value>
+    <description>Database name used as the Hive Metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <description>Driver class name for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value>hive</value>
+    <description>username to use against metastore database</description>
+  </property>
+
+  <property require-input="true">
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value> </value>
+    <property-type>PASSWORD</property-type>
+    <description>password to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value>false</value>
+    <description>If true, the metastore thrift interface will be secured with SASL.
+     Clients must authenticate with Kerberos.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://localhost:9083</value>
+    <description>URI for client to contact metastore server</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>Pre-event listener classes to be loaded on the metastore side to run code
+      whenever databases, tables, and partitions are created, altered, or dropped.
+      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+      if metastore-side authorization is desired.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>Pre-event listener classes to be loaded on the metastore side to run code
+      whenever databases, tables, and partitions are created, altered, or dropped.
+      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+      if metastore-side authorization is desired.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>60</value>
+    <description>MetaStore Client socket timeout in seconds</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.enabled</name>
+    <value>false</value>
+    <description>enable or disable the hive client authorization</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
+      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
+      process runs as.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.impersonation</name>
+    <description>Enable user impersonation for HiveServer2</description>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication</name>
+    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
+    <value>NONE</value>
+  </property>
+
+  <property>
+    <name>fs.hdfs.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable HDFS filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>fs.file.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable local filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.bucketing</name>
+    <value>true</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.sorting</name>
+    <value>true</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin</name>
+    <value>true</value>
+    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
+      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
+      this parameter as true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <value>false</value>
+    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
+    of buckets, a sort-merge join can be performed by setting this parameter as true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>Whether speculative execution for reducers should be turned on.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common
+      join into mapjoin based on the input file size.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>true</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
+      the criteria for sort-merge join.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
+    <value>true</value>
+    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
+      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>1000000000</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task). The default is 10MB.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>1</value>
+    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be disabled if number of reducers is less than specified value.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.mapjoin.mapreduce</name>
+    <value>true</value>
+    <description>If hive.auto.convert.join is off, this parameter does not take
+      affect. If it is on, and if there are map-join jobs followed by a map-reduce
+      job (for e.g a group by), each map-only job is merged with the following
+      map-reduce job.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>10000</value>
+    <description>
+      Size per reducer.The default is 1G, i.e if the input size is 10G, it
+      will use 10 reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>false</value>
+    <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication</name>
+    <value>true</value>
+    <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.index.filter</name>
+    <value>true</value>
+    <description>
+    Whether to enable automatic use of indexes
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.port</name>
+    <value>10000</value>
+    <description>
+      TCP port number to listen on, default 10000.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.support.dynamic.service.discovery</name>
+    <value>false</value>
+    <description>Whether HiveServer2 supports dynamic service discovery for its
+      clients. To support this, each instance of HiveServer2 currently uses
+      ZooKeeper to register itself, when it is brought up. JDBC/ODBC clients
+      should use the ZooKeeper ensemble: hive.zookeeper.quorum in their
+      connection string.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.zookeeper.namespace</name>
+    <value>hiveserver2</value>
+    <description>The parent node in ZooKeeper used by HiveServer2 when
+      supporting dynamic service discovery.
+    </description>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-env.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-env.xml
new file mode 100644
index 0000000..14a473f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-env.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- webhcat-env.sh -->
+  <property>
+    <name>content</name>
+    <description>webhcat-env.sh content</description>
+    <value>
+# The file containing the running pid
+PID_FILE={{webhcat_pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME={{hadoop_home}}
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
new file mode 100644
index 0000000..335048e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/configuration/webhcat-site.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The default settings for Templeton. -->
+<!-- Edit templeton-site.xml to change settings for your local -->
+<!-- install. -->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>templeton.port</name>
+    <value>50111</value>
+    <description>The HTTP port for the main server.</description>
+  </property>
+
+  <property>
+    <name>templeton.hadoop.conf.dir</name>
+    <value>/etc/hadoop/conf</value>
+    <description>The path to the Hadoop configuration.</description>
+  </property>
+
+  <property>
+    <name>templeton.jar</name>
+    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
+    <description>The path to the Templeton jar file.</description>
+  </property>
+
+  <property>
+    <name>templeton.libjars</name>
+    <value>/usr/lib/zookeeper/zookeeper.jar</value>
+    <description>Jars to add the the classpath.</description>
+  </property>
+
+  <property>
+    <name>templeton.hadoop</name>
+    <value>/usr/bin/hadoop</value>
+    <description>The path to the Hadoop executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.archive</name>
+    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
+    <description>The path to the Pig archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.path</name>
+    <value>pig.tar.gz/pig/bin/pig</value>
+    <description>The path to the Pig executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat</name>
+    <value>/usr/bin/hcat</value>
+    <description>The path to the hcatalog executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.archive</name>
+    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
+    <description>The path to the Hive archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.home</name>
+    <value>hive.tar.gz/hive</value>
+    <description>The path to the Hive home within the tar. Has no effect if templeton.hive.archive is not set.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat.home</name>
+    <value>hive.tar.gz/hive/hcatalog</value>
+    <description>The path to the HCat home within the tar. Has no effect if templeton.hive.archive is not set.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.path</name>
+    <value>hive.tar.gz/hive/bin/hive</value>
+    <description>The path to the Hive executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.properties</name>
+    <value>hive.metastore.local=false, hive.metastore.uris=thrift://localhost:9933, hive.metastore.sasl.enabled=false</value>
+    <description>Properties to set when running hive.</description>
+  </property>
+
+
+  <property>
+    <name>templeton.zookeeper.hosts</name>
+    <value>localhost:2181</value>
+    <description>ZooKeeper servers, as comma separated host:port pairs</description>
+  </property>
+
+  <property>
+    <name>templeton.storage.class</name>
+    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
+    <description>The class to use as storage</description>
+  </property>
+
+  <property>
+    <name>templeton.override.enabled</name>
+    <value>false</value>
+    <description>Enable the override path in templeton.override.jars</description>
+  </property>
+
+  <property>
+    <name>templeton.streaming.jar</name>
+    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
+    <description>The hdfs path to the Hadoop streaming jar file.</description>
+  </property> 
+
+  <property>
+    <name>templeton.exec.timeout</name>
+    <value>60000</value>
+    <description>Time out for templeton api</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.mysql.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.mysql.sql b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.mysql.sql
new file mode 100644
index 0000000..b0415b1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.mysql.sql
@@ -0,0 +1,777 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31


[22/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/status_params.py
new file mode 100644
index 0000000..0c69ca9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['ganglia-env']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/ganglia.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/ganglia.conf.j2 b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/ganglia.conf.j2
new file mode 100644
index 0000000..a08fb31
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/ganglia.conf.j2
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+Alias /ganglia "{{ganglia_web_path}}"
+
+<Directory "{{ganglia_web_path}}">
+#  SSLRequireSSL
+   Options None
+   AllowOverride None
+   Order allow,deny
+   Allow from all
+#  Order deny,allow
+#  Deny from all
+#  Allow from 127.0.0.1
+#  AuthName "Ganglia Access"
+#  AuthType Basic
+#  AuthUserFile /etc/ganglia/htpasswd.users
+#  Require valid-user
+</Directory>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaClusters.conf.j2
new file mode 100644
index 0000000..ffb4e84
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaClusters.conf.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#########################################################
+### ClusterName           GmondMasterHost   GmondPort ###
+#########################################################
+
+{% for x in ganglia_clusters %}
+    {{ x[0] }}       	  {{ganglia_server_host}}  {{ x[1] }}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaEnv.sh.j2
new file mode 100644
index 0000000..0b68623
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaEnv.sh.j2
@@ -0,0 +1,46 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Unix users and groups for the binaries we start up.
+RRD_ROOTDIR={{rrdcached_base_dir}}
+GMETAD_USER={{gmetad_user}};
+GMOND_USER={{gmond_user}};
+WEBSERVER_GROUP={{webserver_group}};
+MODULES_DIR={{modules_dir}}
+GANGLIA_WEB_PATH={{ganglia_web_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaLib.sh.j2
new file mode 100644
index 0000000..6c24c7f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/gangliaLib.sh.j2
@@ -0,0 +1,85 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+GANGLIA_CONF_DIR={{ganglia_conf_dir}};
+GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
+RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
+RRDCACHED_WRITE_THREADS={{rrdcached_write_threads}}
+RRDCACHED_TIMEOUT={{rrdcached_timeout}}
+RRDCACHED_FLUSH_TIMEOUT={{rrdcached_flush_timeout}}
+RRDCACHED_DELAY={{rrdcached_delay}}
+
+# This file contains all the info about each Ganglia Cluster in our Grid.
+GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
+
+function createDirectory()
+{
+    directoryPath=${1};
+
+    if [ "x" != "x${directoryPath}" ]
+    then
+        mkdir -p ${directoryPath};
+    fi
+}
+
+function getGangliaClusterInfo()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    else
+        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    fi
+}
+
+function getConfiguredGangliaClusterNames()
+{
+  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
+  # the subdirectory name from each.
+  if [ -e ${GANGLIA_CONF_DIR} ]
+  then  
+    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
+  fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/rrd.py.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/rrd.py.j2 b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/rrd.py.j2
new file mode 100644
index 0000000..65d70e2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/templates/rrd.py.j2
@@ -0,0 +1,361 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+# NOTE: This script is executed by Python 2.4 on Centos 5. 
+# Make sure your changes are compatible.
+
+import cgi
+import glob
+import os
+import re
+import rrdtool
+import sys
+import time
+import urlparse
+
+# place this script in /var/www/cgi-bin of the Ganglia collector
+# requires 'yum install rrdtool-python' on the Ganglia collector
+'''
+  Loads rrd file info
+'''
+def loadRRDData(file, cf, start, end, resolution):
+  args = [file, cf, "--daemon", "unix:{{ganglia_runtime_dir}}/rrdcached.limited.sock"]
+
+  if start is not None:
+    args.extend(["-s", start])
+  else:
+    args.extend(["-s", "now-10m"])
+
+  if end is not None:
+    args.extend(["-e", end])
+
+  if resolution is not None:
+    args.extend(["-r", resolution])
+
+  return rrdtool.fetch(args)
+
+'''
+  Collects metrics across several matching filenames.
+'''
+def collectStatMetrics(clusterName, hostName, metricName, files, cf, start, end, resolution):
+  if clusterName[0] is not '/':
+    clusterName.insert(0, '/')
+
+  metricParts = metricName.split('.')
+
+  # already know there's at least one
+  metricStat = metricParts[-1]
+  metricName = '.'.join(metricParts[:-1])
+
+  isRate = False
+  if len(metricParts) > 1 and metricParts[-2] == '_rate':
+    isRate = True
+    metricName = '.'.join(metricParts[:-2])
+
+  pattern = re.compile(metricName + '\.rrd$')
+  matchedFiles = filter(pattern.match, files)
+
+  parentPath = os.path.join(*clusterName)
+
+  actualFiles = []
+  for matchedFile in matchedFiles:
+    if hostName != "__SummaryInfo__":
+      osFiles = glob.glob(os.path.join(parentPath, hostName, matchedFile))
+    else:
+      osFiles = glob.glob(os.path.join(parentPath, '*', matchedFile))
+
+    for f in osFiles:
+      if -1 == f.find("__SummaryInfo__"):
+        actualFiles.append(f)
+
+  if len(actualFiles) == 0:
+    return
+
+  '''
+  [
+    {
+      "step_value": update each iteration
+      "count": increase by 1 each iteration
+      "sum": increase by value each iteration
+      "avg": update each iteration as sum/count
+      "min": update each iteration if step_value < old min OR min is missing (first time)
+      "max": update each iteration if step_value > old max OR max is missing (first time)
+    }
+  ]
+  '''
+
+  timestamp = None
+  stepsize = None
+  concreteMetricName = None
+  vals = None # values across all files
+
+  for file in actualFiles:
+    rrdMetric = loadRRDData(file, cf, start, end, resolution)
+    
+    if timestamp is None and stepsize is None and concreteMetricName is None:
+      timestamp = rrdMetric[0][0]
+      stepsize = rrdMetric[0][2]
+      
+      if not isRate:
+        suffix = metricStat
+      else:
+        suffix = '_rate.' + metricStat
+      
+      concreteMetricName = file.split(os.sep).pop().replace('rrd', suffix)
+
+    metricValues = rrdMetric[2]
+
+    if vals is None:
+      vals = [None] * len(metricValues)
+
+    i = 0
+    for tuple in metricValues:
+      if vals[i] is None:
+        vals[i] = {}
+        vals[i]['count'] = 0
+        vals[i]['_sum'] = 0.0
+        vals[i]['_avg'] = 0.0
+        vals[i]['_min'] = 999999999999.99
+        vals[i]['_max'] = 0.0
+
+      rawValue = tuple[0]
+      vals[i]['step_value'] = rawValue
+      if rawValue is None:
+        i += 1
+        continue
+
+      if isRate:
+        if 0 == i:
+          rawValue = 0.0
+        elif vals[i-1]['step_value'] is None:
+          rawValue = 0.0
+        else:
+          rawValue = (rawValue - vals[i-1]['step_value']) / stepsize
+      
+      vals[i]['count'] += 1 
+      vals[i]['_sum'] += rawValue
+
+      vals[i]['_avg'] = vals[i]['_sum']/vals[i]['count']
+
+      if rawValue < vals[i]['_min']:
+        vals[i]['_min'] = rawValue
+
+      if rawValue > vals[i]['_max']:
+        vals[i]['_max'] = rawValue
+      
+      i += 1
+
+  sys.stdout.write("sum\n")
+  sys.stdout.write(clusterName[len(clusterName)-1] + "\n")
+  sys.stdout.write(hostName + "\n")
+  sys.stdout.write(concreteMetricName + "\n")
+  sys.stdout.write(str(timestamp) + "\n")
+  sys.stdout.write(str(stepsize) + "\n")
+
+  for val in vals:
+    if val['step_value'] is None:
+      sys.stdout.write("[~n]")
+    else:
+      sys.stdout.write(str(val[metricStat]))
+    sys.stdout.write("\n")
+
+  sys.stdout.write("[~EOM]\n")
+
+  return
+
+def printMetric(clusterName, hostName, metricName, file, cf, start, end,
+                resolution, pointInTime):
+  if clusterName.endswith("rrds"):
+    clusterName = ""
+ 
+  rrdMetric = loadRRDData(file, cf, start, end, resolution)
+
+  # ds_name
+  sys.stdout.write(rrdMetric[1][0])
+  sys.stdout.write("\n")
+
+  sys.stdout.write(clusterName)
+  sys.stdout.write("\n")
+  sys.stdout.write(hostName)
+  sys.stdout.write("\n")
+  sys.stdout.write(metricName)
+  sys.stdout.write("\n")
+
+  # write time
+  sys.stdout.write(str(rrdMetric[0][0]))
+  sys.stdout.write("\n")
+  # write step
+  sys.stdout.write(str(rrdMetric[0][2]))
+  sys.stdout.write("\n")
+
+  if not pointInTime:
+    valueCount = 0
+    lastValue = None
+
+    for tuple in rrdMetric[2]:
+
+      thisValue = tuple[0]
+
+      if valueCount > 0 and thisValue == lastValue:
+        valueCount += 1
+      else:
+        if valueCount > 1:
+          sys.stdout.write("[~r]")
+          sys.stdout.write(str(valueCount))
+          sys.stdout.write("\n")
+
+        if thisValue is None:
+          sys.stdout.write("[~n]\n")
+        else:
+          sys.stdout.write(str(thisValue))
+          sys.stdout.write("\n")
+
+        valueCount = 1
+        lastValue = thisValue
+  else:
+    value = None
+    idx = -1
+    tuple = rrdMetric[2]
+    tupleLastIdx = len(tuple) * -1
+
+    while value is None and idx >= tupleLastIdx:
+      value = tuple[idx][0]
+      idx -= 1
+
+    if value is not None:
+      sys.stdout.write(str(value))
+      sys.stdout.write("\n")
+
+  sys.stdout.write("[~EOM]\n")
+  return
+
+
+def stripList(l):
+  return ([x.strip() for x in l])
+
+
+sys.stdout.write("Content-type: text/plain\n\n")
+
+# write start time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+requestMethod = os.environ['REQUEST_METHOD']
+
+if requestMethod == 'POST':
+  postData = sys.stdin.readline()
+  queryString = cgi.parse_qs(postData)
+  queryString = dict((k, v[0]) for k, v in queryString.items())
+elif requestMethod == 'GET':
+  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
+
+if "m" in queryString:
+  metricParts = queryString["m"].split(",")
+else:
+  metricParts = [""]
+metricParts = stripList(metricParts)
+
+hostParts = []
+if "h" in queryString:
+  hostParts = queryString["h"].split(",")
+hostParts = stripList(hostParts)
+
+if "c" in queryString:
+  clusterParts = queryString["c"].split(",")
+else:
+  clusterParts = [""]
+clusterParts = stripList(clusterParts)
+
+if "p" in queryString:
+  rrdPath = queryString["p"]
+else:
+  rrdPath = "{{rrdcached_base_dir}}"
+
+start = None
+if "s" in queryString:
+  start = queryString["s"]
+
+end = None
+if "e" in queryString:
+  end = queryString["e"]
+
+resolution = None
+if "r" in queryString:
+  resolution = queryString["r"]
+
+if "cf" in queryString:
+  cf = queryString["cf"]
+else:
+  cf = "AVERAGE"
+
+if "pt" in queryString:
+  pointInTime = True
+else:
+  pointInTime = False
+
+
+def _walk(*args, **kwargs):
+  for root, dirs, files in os.walk(*args, **kwargs):
+    for dir in dirs:
+      qualified_dir = os.path.join(root, dir)
+      if os.path.islink(qualified_dir):
+        for x in os.walk(qualified_dir, **kwargs):
+          yield x
+    yield (root, dirs, files)
+
+
+for cluster in clusterParts:
+  for path, dirs, files in _walk(os.path.join(rrdPath,cluster)):
+    pathParts = path.split("/")
+    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
+    #If host parameter passed - process only this host folder
+    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
+      for metric in metricParts:
+        file = metric + ".rrd"
+        fileFullPath = os.path.join(path, file)
+        if os.path.exists(fileFullPath):
+          #Exact name of metric
+          printMetric(pathParts[-2], pathParts[-1], file[:-4],
+                      os.path.join(path, file), cf, start, end, resolution,
+                      pointInTime)
+        else:
+          need_stats = False
+          parts = metric.split(".")
+          if len(parts) > 0 and parts[-1] in ['_min', '_max', '_avg', '_sum']:
+              need_stats = True
+
+          if need_stats and not pointInTime:
+            collectStatMetrics(pathParts[:-1], pathParts[-1], metric, files, cf, start, end, resolution)
+          else:
+            #Regex as metric name
+            metricRegex = metric + '\.rrd$'
+            p = re.compile(metricRegex)
+            matchedFiles = filter(p.match, files)
+            for matchedFile in matchedFiles:
+              printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
+                         os.path.join(path, matchedFile), cf, start, end,
+                         resolution, pointInTime)
+
+sys.stdout.write("[~EOF]\n")
+# write end time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+sys.stdout.flush

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/alerts.json b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/alerts.json
new file mode 100644
index 0000000..fa911e1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/alerts.json
@@ -0,0 +1,124 @@
+{
+  "HBASE": {
+    "service": [
+      {
+        "name": "hbase_regionserver_process_percent",
+        "label": "Percent RegionServers Available",
+        "description": "This service-level alert is triggered if the configured percentage of RegionServer processes cannot be determined to be up and listening on the network for the configured warning and critical thresholds. It aggregates the results of RegionServer process down checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "hbase_regionserver_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.1
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.3
+            }
+          }
+        }
+      }    
+    ],
+    "HBASE_MASTER": [
+      {
+        "name": "hbase_master_process",
+        "label": "HBase Master Process",
+        "description": "This alert is triggered if the HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.master.port}}",
+          "default_port": 60000,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "hbase_master_cpu",
+        "label": "HBase Maser CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the HBase Master exceeds certain warning and critical thresholds. It checks the HBase Master JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hbase-site/hbase.master.info.port}}",
+            "https": "{{hbase-site/hbase.master.info.port}}",
+            "https_property": "{{cluster-env/security_enabled}}",
+            "https_property_value": "true",
+            "default_port": 60010
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      }
+    ],
+    "HBASE_REGIONSERVER": [
+      {
+        "name": "hbase_regionserver_process",
+        "label": "HBase RegionServer Process",
+        "description": "This host-level alert is triggered if the RegionServer processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hbase-site/hbase.regionserver.info.port}}",
+          "default_port": 60030,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
new file mode 100644
index 0000000..231baa6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-env.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/hbase</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/hbase</value>
+    <description>Pid Directory for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>1024</value>
+    <description>HBase RegionServer Heap Size.</description>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_max</name>
+    <value>512</value>
+    <description>HBase RegionServer maximum value for minimum heap size.</description>
+  </property>
+  <property>
+    <name>hbase_regionserver_xmn_ratio</name>
+    <value>0.2</value>
+    <description>HBase RegionServer minimum heap size is calculated as a percentage of max heap size.</description>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>1024</value>
+    <description>HBase Master Heap Size</description>
+  </property>
+   <property>
+    <name>hbase_user</name>
+    <value>hbase</value>
+    <property-type>USER</property-type>
+    <description>HBase User Name.</description>
+  </property>
+
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hbase-env.sh file</description>
+    <value>
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
+{% else %}
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+{% endif %}
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-log4j.xml
new file mode 100644
index 0000000..57b3845
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-log4j.xml
@@ -0,0 +1,143 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.security.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Rolling File Appender properties
+hbase.log.maxfilesize=256MB
+hbase.log.maxbackupindex=20
+
+# Rolling File Appender
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+#
+# Security audit appender
+#
+hbase.security.log.file=SecurityAuth.audit
+hbase.security.log.maxfilesize=256MB
+hbase.security.log.maxbackupindex=20
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
+log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.category.SecurityLogger=${hbase.security.logger}
+log4j.additivity.SecurityLogger=false
+#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
+
+#
+# Null Appender
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
+
+# Custom Logging levels
+
+log4j.logger.org.apache.zookeeper=INFO
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
+log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
+#log4j.logger.org.apache.hadoop.dfs=DEBUG
+# Set this class to log INFO only otherwise its OTT
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
+
+
+# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
+#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
+
+# Uncomment the below if you want to remove logging of client region caching'
+# and scan of .META. messages
+# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
+# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-policy.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-policy.xml
new file mode 100644
index 0000000..2f12801
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HRegionInterface protocol implementations (ie. 
+    clients talking to HRegionServers)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterInterface protocol implementation (ie. 
+    clients talking to HMaster for admin operations).
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.masterregion.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterRegionInterface protocol implementations
+    (for HRegionServers communicating with HMaster)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml
new file mode 100644
index 0000000..84900d1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/configuration/hbase-site.xml
@@ -0,0 +1,331 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://localhost:8020/apps/hbase/data</value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.port</name>
+    <value>60000</value>
+    <description>The port the HBase Master should bind to.</description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/hadoop/hbase</value>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.local.dir</name>
+    <value>${hbase.tmp.dir}/local</value>
+    <description>Directory on the local filesystem to be used as a local storage
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value>0.0.0.0</value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>60010</value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>60030</value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.4</value>
+    <description>Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>60</value>
+    <description>Count of RPC Listener instances spun up on RegionServers.
+    Same property is used by the Master for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>86400000</value>
+    <description>The time (in milliseconds) between 'major' compactions of all
+    HStoreFiles in a region.  Default: 1 day.
+    Set to 0 to disable automated major compactions.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.38</value>
+    <description>When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>2</value>
+    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    time hbase.hregion.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+    Memstore will be flushed to disk if size of the memstore
+    exceeds this number of bytes.  Value is checked by a thread that runs
+    every hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value>true</value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>10737418240</value>
+    <description>
+    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+    grown to exceed this value, the hosting HRegion is split in two.
+    Default: 1G.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>100</value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>30000</value>
+    <description>ZooKeeper session timeout.
+      HBase passes this to the zk quorum as suggested maximum time for a
+      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+      "The client sends a requested timeout, the server responds with the
+      timeout that it can give the client. " In milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value>10485760</value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value>3</value>
+    <description>
+    If more than this number of HStoreFiles in any one HStore
+    (one HStoreFile is written per flush of memstore) then a compaction
+    is run to rewrite all HStoreFiles files as one.  Larger numbers
+    put off compaction but when it runs, it takes longer to complete.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.flush.retries.number</name>
+    <value>120</value>
+    <description>
+    The number of times the region flush operation will be retried.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value>10</value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.40</value>
+    <description>
+        Percentage of maximum heap (-Xmx setting) to allocate to block cache
+        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+        Set to 0 to disable but it's not recommended.
+    </description>
+  </property>
+
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
+      (no authentication), and 'kerberos'.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value></value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+    default on all tables. For any override coprocessor method, these classes
+    will be called in order. After implementing your own Coprocessor, just put
+    it in HBase's classpath and add the fully qualified class name here.
+    A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <property>
+    <name>hbase.zookeeper.useMulti</name>
+    <value>true</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
+    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase-unsecure</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+    <description>Disables version verification.</description>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>Path to domain socket.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
new file mode 100644
index 0000000..fd290df
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
@@ -0,0 +1,144 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <displayName>HBase</displayName>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <version>0.96.0.2.0</version>
+      <components>
+        <component>
+          <name>HBASE_MASTER</name>
+          <displayName>HBase Master</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HBASE/HBASE_MASTER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HBASE_REGIONSERVER</name>
+          <displayName>RegionServer</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HBASE_CLIENT</name>
+          <displayName>HBase Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/hbase_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-site.xml</fileName>
+              <dictionaryName>hbase-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hbase-env.sh</fileName>
+              <dictionaryName>hbase-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>hbase-policy.xml</fileName>
+              <dictionaryName>hbase-policy</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hbase-log4j</dictionaryName>
+            </configFile>            
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hbase</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>hbase-policy</config-type>
+        <config-type>hbase-site</config-type>
+        <config-type>hbase-env</config-type>
+        <config-type>hbase-log4j</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>


[11/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/alert_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/alert_checkpoint_time.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/alert_checkpoint_time.py
new file mode 100644
index 0000000..410608f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/alert_checkpoint_time.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import time
+import urllib2
+import json
+
+LABEL = 'Last Checkpoint: [{h} hours, {m} minutes, {tx} transactions]'
+
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+NN_HTTP_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+NN_CHECKPOINT_TX_KEY = '{{hdfs-site/dfs.namenode.checkpoint.txns}}'
+NN_CHECKPOINT_PERIOD_KEY = '{{hdfs-site/dfs.namenode.checkpoint.period}}'
+
+PERCENT_WARNING = 200
+PERCENT_CRITICAL = 200
+
+CHECKPOINT_TX_DEFAULT = 1000000
+CHECKPOINT_PERIOD_DEFAULT = 21600
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (NN_HTTP_ADDRESS_KEY, NN_HTTPS_ADDRESS_KEY, NN_HTTP_POLICY_KEY, 
+      NN_CHECKPOINT_TX_KEY, NN_CHECKPOINT_PERIOD_KEY)      
+  
+
+def execute(parameters=None, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  parameters (dictionary): a mapping of parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if parameters is None:
+    return (('UNKNOWN', ['There were no parameters supplied to the script.']))
+  
+  uri = None
+  scheme = 'http'  
+  http_uri = None
+  https_uri = None
+  http_policy = 'HTTP_ONLY'
+  percent_warning = PERCENT_WARNING
+  percent_critical = PERCENT_CRITICAL
+  checkpoint_tx = CHECKPOINT_TX_DEFAULT
+  checkpoint_period = CHECKPOINT_PERIOD_DEFAULT
+  
+  if NN_HTTP_ADDRESS_KEY in parameters:
+    http_uri = parameters[NN_HTTP_ADDRESS_KEY]
+
+  if NN_HTTPS_ADDRESS_KEY in parameters:
+    https_uri = parameters[NN_HTTPS_ADDRESS_KEY]
+
+  if NN_HTTP_POLICY_KEY in parameters:
+    http_policy = parameters[NN_HTTP_POLICY_KEY]
+
+  if NN_CHECKPOINT_TX_KEY in parameters:
+    checkpoint_tx = parameters[NN_CHECKPOINT_TX_KEY]
+
+  if NN_CHECKPOINT_PERIOD_KEY in parameters:
+    checkpoint_period = parameters[NN_CHECKPOINT_PERIOD_KEY]
+    
+  # determine the right URI and whether to use SSL
+  uri = http_uri
+  if http_policy == 'HTTPS_ONLY':
+    scheme = 'https'
+    
+    if https_uri is not None:
+      uri = https_uri 
+  
+  current_time = int(round(time.time() * 1000))
+
+  last_checkpoint_time_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem".format(scheme,uri)
+  journal_transaction_info_qry = "{0}://{1}/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo".format(scheme,uri)
+
+  # start out assuming an OK status
+  label = None
+  result_code = "OK"
+
+  try:
+    last_checkpoint_time = int(get_value_from_jmx(last_checkpoint_time_qry,"LastCheckpointTime"))
+    journal_transaction_info = get_value_from_jmx(journal_transaction_info_qry,"JournalTransactionInfo")
+    journal_transaction_info_dict = json.loads(journal_transaction_info)
+  
+    last_tx = int(journal_transaction_info_dict['LastAppliedOrWrittenTxId'])
+    most_recent_tx = int(journal_transaction_info_dict['MostRecentCheckpointTxId'])
+    transaction_difference = last_tx - most_recent_tx
+    
+    delta = (current_time - last_checkpoint_time)/1000
+
+    label = LABEL.format(h=get_time(delta)['h'], m=get_time(delta)['m'], tx=transaction_difference)
+    
+    if (transaction_difference > int(checkpoint_tx)) and (float(delta) / int(checkpoint_period)*100 >= int(percent_critical)):
+      result_code = 'CRITICAL'
+    elif (transaction_difference > int(checkpoint_tx)) and (float(delta) / int(checkpoint_period)*100 >= int(percent_warning)):
+      result_code = 'WARNING'
+
+  except Exception, e:
+    label = str(e)
+    result_code = 'UNKNOWN'
+        
+  return ((result_code, [label]))
+
+def get_time(delta):
+  h = int(delta/3600)
+  m = int((delta % 3600)/60)
+  return {'h':h, 'm':m}
+
+
+def get_value_from_jmx(qry, property):
+  response = urllib2.urlopen(qry)
+  data=response.read()
+  data_dict = json.loads(data)
+  return data_dict["beans"][0][property]

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/alert_ha_namenode_health.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/alert_ha_namenode_health.py
new file mode 100644
index 0000000..fc1541d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/alert_ha_namenode_health.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import urllib2
+import json
+
+RESULT_STATE_OK = 'OK'
+RESULT_STATE_CRITICAL = 'CRITICAL'
+RESULT_STATE_UNKNOWN = 'UNKNOWN'
+RESULT_STATE_SKIPPED = 'SKIPPED'
+
+HDFS_NN_STATE_ACTIVE = 'active'
+HDFS_NN_STATE_STANDBY = 'standby'
+
+HDFS_SITE_KEY = '{{hdfs-site}}'
+NAMESERVICE_KEY = '{{hdfs-site/dfs.nameservices}}'
+NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
+NN_HTTPS_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.https-address}}'
+DFS_POLICY_KEY = '{{hdfs-site/dfs.http.policy}}'
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (HDFS_SITE_KEY, NAMESERVICE_KEY, NN_HTTP_ADDRESS_KEY,
+  NN_HTTPS_ADDRESS_KEY, DFS_POLICY_KEY)
+  
+
+def execute(parameters=None, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  parameters (dictionary): a mapping of parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+  if parameters is None:
+    return (RESULT_STATE_UNKNOWN, ['There were no parameters supplied to the script.'])
+
+  # if not in HA mode, then SKIP
+  if not NAMESERVICE_KEY in parameters:
+    return (RESULT_STATE_SKIPPED, ['NameNode HA is not enabled'])
+
+  # hdfs-site is required
+  if not HDFS_SITE_KEY in parameters:
+    return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(HDFS_SITE_KEY)])
+
+  # determine whether or not SSL is enabled
+  is_ssl_enabled = False
+  if DFS_POLICY_KEY in parameters:
+    dfs_policy = parameters[DFS_POLICY_KEY]
+    if dfs_policy == "HTTPS_ONLY":
+      is_ssl_enabled = True
+
+  name_service = parameters[NAMESERVICE_KEY]
+  hdfs_site = parameters[HDFS_SITE_KEY]
+
+  # look for dfs.ha.namenodes.foo
+  nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
+  if not nn_unique_ids_key in hdfs_site:
+    return (RESULT_STATE_UNKNOWN, ['Unable to find unique namenode alias key {0}'.format(nn_unique_ids_key)])
+
+  namenode_http_fragment = 'dfs.namenode.http-address.{0}.{1}'
+  jmx_uri_fragment = "http://{0}/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"
+
+  if is_ssl_enabled:
+    namenode_http_fragment = 'dfs.namenode.https-address.{0}.{1}'
+    jmx_uri_fragment = "https://{0}/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"
+
+
+  active_namenodes = []
+  standby_namenodes = []
+  unknown_namenodes = []
+
+  # now we have something like 'nn1,nn2,nn3,nn4'
+  # turn it into dfs.namenode.[property].[dfs.nameservices].[nn_unique_id]
+  # ie dfs.namenode.http-address.hacluster.nn1
+  nn_unique_ids = hdfs_site[nn_unique_ids_key].split(',')
+  for nn_unique_id in nn_unique_ids:
+    key = namenode_http_fragment.format(name_service,nn_unique_id)
+
+    if key in hdfs_site:
+      # use str() to ensure that unicode strings do not have the u' in them
+      value = str(hdfs_site[key])
+
+      try:
+        jmx_uri = jmx_uri_fragment.format(value)
+        state = get_value_from_jmx(jmx_uri,'State')
+
+        if state == HDFS_NN_STATE_ACTIVE:
+          active_namenodes.append(value)
+        elif state == HDFS_NN_STATE_STANDBY:
+          standby_namenodes.append(value)
+        else:
+          unknown_namenodes.append(value)
+      except:
+        unknown_namenodes.append(value)
+
+  # now that the request is done, determine if this host is the host that
+  # should report the status of the HA topology
+  is_active_namenode = False
+  for active_namenode in active_namenodes:
+    if active_namenode.startswith(host_name):
+      is_active_namenode = True
+
+  # there's only one scenario here; there is exactly 1 active and 1 standby
+  is_topology_healthy = len(active_namenodes) == 1 and len(standby_namenodes) == 1
+
+  result_label = 'Active{0}, Standby{1}, Unknown{2}'.format(str(active_namenodes),
+    str(standby_namenodes), str(unknown_namenodes))
+
+  # Healthy Topology:
+  #   - Active NN reports the alert, standby does not
+  #
+  # Unhealthy Topology:
+  #   - Report the alert if this is the first named host
+  #   - Report the alert if not the first named host, but the other host
+  #   could not report its status
+  if is_topology_healthy:
+    if is_active_namenode is True:
+      return (RESULT_STATE_OK, [result_label])
+    else:
+      return (RESULT_STATE_SKIPPED, ['Another host will report this alert'])
+  else:
+    # dfs.namenode.rpc-address.service.alias is guaranteed in HA mode
+    first_listed_host_key = 'dfs.namenode.rpc-address.{0}.{1}'.format(
+      name_service, nn_unique_ids[0])
+
+    first_listed_host = ''
+    if first_listed_host_key in hdfs_site:
+      first_listed_host = hdfs_site[first_listed_host_key]
+
+    is_first_listed_host = False
+    if first_listed_host.startswith(host_name):
+      is_first_listed_host = True
+
+    if is_first_listed_host:
+      return (RESULT_STATE_CRITICAL, [result_label])
+    else:
+      # not the first listed host, but the first host might be in the unknown
+      return (RESULT_STATE_SKIPPED, ['Another host will report this alert'])
+
+
+def get_value_from_jmx(qry, property):
+  response = urllib2.urlopen(qry)
+  data=response.read()
+  data_dict = json.loads(data)
+  return data_dict["beans"][0][property]

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkForFormat.sh b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkForFormat.sh
new file mode 100644
index 0000000..54405f6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkForFormat.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export bin_dir=$1
+shift
+export old_mark_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  sudo rm -f ${mark_file}
+  sudo mkdir -p ${mark_dir}
+fi
+
+if [[ -d $old_mark_dir ]] ; then
+  mv ${old_mark_dir} ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    sudo su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:${bin_dir} ; yes Y | hdfs --config ${conf_dir} ${command}"
+    (( EXIT_CODE = $EXIT_CODE | $? ))
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkWebUI.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkWebUI.py
new file mode 100644
index 0000000..f8e9c1a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files/checkWebUI.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import httplib
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
+  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
+
+  (options, args) = parser.parse_args()
+  
+  hosts = options.hosts.split(',')
+  port = options.port
+
+  for host in hosts:
+    try:
+      conn = httplib.HTTPConnection(host, port)
+      # This can be modified to get a partial url part to be sent with request
+      conn.request("GET", "/")
+      httpCode = conn.getresponse().status
+      conn.close()
+    except Exception:
+      httpCode = 404
+
+    if httpCode != 200:
+      print "Cannot access WEB UI on: http://" + host + ":" + port
+      exit(1)
+      
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/__init__.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/__init__.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""


[23/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkGmetad.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkGmetad.sh
new file mode 100644
index 0000000..f45e371
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkGmetad.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# Before checking gmetad, check rrdcached.
+./checkRrdcached.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+if [ -n "${gmetadRunningPid}" ]
+then
+  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
+else
+  echo "Failed to find running ${GMETAD_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkGmond.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkGmond.sh
new file mode 100644
index 0000000..fbf524a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkGmond.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function checkGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+    # Skip over (purported) Clusters that don't have their core conf file present.
+    if [ -e "${gmondCoreConfFileName}" ]
+    then 
+      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+      if [ -n "${gmondRunningPid}" ]
+      then
+        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
+      else
+        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
+        exit 1;
+      fi
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so check
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        checkGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just check the one ${gmondClusterName} that was asked for.
+    checkGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkRrdcached.sh
new file mode 100644
index 0000000..1e0c2e2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/checkRrdcached.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+if [ -n "${rrdcachedRunningPid}" ]
+then
+  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
+else
+  echo "Failed to find running ${RRDCACHED_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmetad.init b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmetad.init
new file mode 100644
index 0000000..20b388e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmetad.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmetad startup script
+# processname: hdp-gmetad
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
+HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
+HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmetad..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmetad..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmetad..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmetadLib.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmetadLib.sh
new file mode 100644
index 0000000..6a24bed
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmetadLib.sh
@@ -0,0 +1,204 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMETAD_BIN=/usr/sbin/gmetad;
+GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
+GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
+
+function getGmetadLoggedPid()
+{
+    if [ -e "${GMETAD_PID_FILE}" ]
+    then
+        echo `cat ${GMETAD_PID_FILE}`;
+    fi
+}
+
+function getGmetadRunningPid()
+{
+    gmetadLoggedPid=`getGmetadLoggedPid`;
+
+    if [ -n "${gmetadLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmetadConf()
+{
+    now=`date`;
+
+    cat <<END_OF_GMETAD_CONF_1
+#################### Generated by ${0} on ${now} ####################
+#
+#-------------------------------------------------------------------------------
+# Setting the debug_level to 1 will keep daemon in the forground and
+# show only error messages. Setting this value higher than 1 will make 
+# gmetad output debugging information and stay in the foreground.
+# default: 0
+# debug_level 10
+#
+#-------------------------------------------------------------------------------
+# What to monitor. The most important section of this file. 
+#
+# The data_source tag specifies either a cluster or a grid to
+# monitor. If we detect the source is a cluster, we will maintain a complete
+# set of RRD databases for it, which can be used to create historical 
+# graphs of the metrics. If the source is a grid (it comes from another gmetad),
+# we will only maintain summary RRDs for it.
+#
+# Format: 
+# data_source "my cluster" [polling interval] address1:port addreses2:port ...
+# 
+# The keyword 'data_source' must immediately be followed by a unique
+# string which identifies the source, then an optional polling interval in 
+# seconds. The source will be polled at this interval on average. 
+# If the polling interval is omitted, 15sec is asssumed. 
+#
+# If you choose to set the polling interval to something other than the default,
+# note that the web frontend determines a host as down if its TN value is less
+# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
+# to something around or greater than 80sec, this will cause the frontend to
+# incorrectly display hosts as down even though they are not.
+#
+# A list of machines which service the data source follows, in the 
+# format ip:port, or name:port. If a port is not specified then 8649
+# (the default gmond port) is assumed.
+# default: There is no default value
+#
+# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
+# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
+# data_source "another source" 1.3.4.7:8655  1.3.4.8
+END_OF_GMETAD_CONF_1
+
+    # Get info about all the configured Ganglia clusters.
+    getGangliaClusterInfo | while read gangliaClusterInfoLine
+    do
+        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
+        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
+        # ...and generate a corresponding data_source line for gmetad.conf. 
+        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
+    done
+
+    cat <<END_OF_GMETAD_CONF_2
+#
+# Round-Robin Archives
+# You can specify custom Round-Robin archives here (defaults are listed below)
+#
+# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
+# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+#      "RRA:AVERAGE:0.5:5760:374"
+# New Default RRA
+# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
+# Two weeks of data points at 1 minute resolution (average)
+#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
+# Retaining existing resolution
+RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+     "RRA:AVERAGE:0.5:5760:374"
+#
+#-------------------------------------------------------------------------------
+# Scalability mode. If on, we summarize over downstream grids, and respect
+# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
+# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
+# we are the "authority" on data source feeds. This approach does not scale to
+# large groups of clusters, but is provided for backwards compatibility.
+# default: on
+# scalable off
+#
+#-------------------------------------------------------------------------------
+# The name of this Grid. All the data sources above will be wrapped in a GRID
+# tag with this name.
+# default: unspecified
+gridname "HDP_GRID"
+#
+#-------------------------------------------------------------------------------
+# The authority URL for this grid. Used by other gmetads to locate graphs
+# for our data sources. Generally points to a ganglia/
+# website on this machine.
+# default: "http://hostname/ganglia/",
+#   where hostname is the name of this machine, as defined by gethostname().
+# authority "http://mycluster.org/newprefix/"
+#
+#-------------------------------------------------------------------------------
+# List of machines this gmetad will share XML with. Localhost
+# is always trusted. 
+# default: There is no default value
+# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
+#
+#-------------------------------------------------------------------------------
+# If you want any host which connects to the gmetad XML to receive
+# data, then set this value to "on"
+# default: off
+# all_trusted on
+#
+#-------------------------------------------------------------------------------
+# If you don't want gmetad to setuid then set this to off
+# default: on
+# setuid off
+#
+#-------------------------------------------------------------------------------
+# User gmetad will setuid to (defaults to "nobody")
+# default: "nobody"
+setuid_username "${GMETAD_USER}"
+#
+#-------------------------------------------------------------------------------
+# Umask to apply to created rrd files and grid directory structure
+# default: 0 (files are public)
+# umask 022
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer requests for XML
+# default: 8651
+# xml_port 8651
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer queries for XML. This facility allows
+# simple subtree and summation views of the XML tree.
+# default: 8652
+# interactive_port 8652
+#
+#-------------------------------------------------------------------------------
+# The number of threads answering XML requests
+# default: 4
+# server_threads 10
+#
+#-------------------------------------------------------------------------------
+# Where gmetad stores its round-robin databases
+# default: "/var/lib/ganglia/rrds"
+rrd_rootdir "${RRD_ROOTDIR}"
+#
+#-------------------------------------------------------------------------------
+# In earlier versions of gmetad, hostnames were handled in a case
+# sensitive manner
+# If your hostname directories have been renamed to lower case,
+# set this option to 0 to disable backward compatibility.
+# From version 3.2, backwards compatibility will be disabled by default.
+# default: 1   (for gmetad < 3.2)
+# default: 0   (for gmetad >= 3.2)
+case_sensitive_hostnames 1
+END_OF_GMETAD_CONF_2
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmond.init b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmond.init
new file mode 100644
index 0000000..afb7026
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmond.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmond startup script
+# processname: hdp-gmond
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
+HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
+HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmond..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmond..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmond..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmondLib.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmondLib.sh
new file mode 100644
index 0000000..d06afd8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/gmondLib.sh
@@ -0,0 +1,538 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMOND_BIN=/usr/sbin/gmond;
+GMOND_CORE_CONF_FILE=gmond.core.conf;
+GMOND_MASTER_CONF_FILE=gmond.master.conf;
+GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
+GMOND_PID_FILE=gmond.pid;
+
+# Functions.
+function getGmondCoreConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
+    fi
+}
+
+function getGmondMasterConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    fi
+}
+
+function getGmondSlaveConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    fi
+}
+
+function getGmondPidFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
+    else
+        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
+    fi
+}
+
+function getGmondLoggedPid()
+{
+    gmondPidFile=`getGmondPidFileName ${1}`;
+
+    if [ -e "${gmondPidFile}" ]
+    then
+        echo `cat ${gmondPidFile}`;
+    fi
+}
+
+function getGmondRunningPid()
+{
+    gmondLoggedPid=`getGmondLoggedPid ${1}`;
+
+    if [ -n "${gmondLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmondCoreConf()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_CORE_CONF
+#################### Generated by ${0} on ${now} ####################
+#
+/* This configuration is as close to 2.5.x default behavior as possible
+   The values closely match ./gmond/metric.h definitions in 2.5.x */
+globals {
+  daemonize = yes
+  setuid = yes
+  user = ${GMOND_USER}
+  debug_level = 0
+  max_udp_msg_len = 1472
+  mute = no
+  deaf = no 
+  allow_extra_data = yes
+  host_dmax = 0 /*secs */
+  host_tmax = 20 /*secs */
+  cleanup_threshold = 300 /*secs */
+  gexec = no
+  send_metadata_interval = 30 /*secs */
+}
+
+/*
+ * The cluster attributes specified will be used as part of the <CLUSTER>
+ * tag that will wrap all hosts collected by this instance.
+ */
+cluster {
+  name = "${gmondClusterName}"
+  owner = "unspecified"
+  latlong = "unspecified"
+  url = "unspecified"
+}
+
+/* The host section describes attributes of the host, like the location */
+host {
+  location = "unspecified"
+}
+
+/* You can specify as many tcp_accept_channels as you like to share
+ * an XML description of the state of the cluster.
+ *
+ * At the very least, every gmond must expose its XML state to 
+ * queriers from localhost.
+ */
+tcp_accept_channel {
+  bind = 0.0.0.0
+  port = ${gmondPort}
+}
+
+/* Each metrics module that is referenced by gmond must be specified and
+   loaded. If the module has been statically linked with gmond, it does
+   not require a load path. However all dynamically loadable modules must
+   include a load path. */
+modules {
+  module {
+    name = "core_metrics"
+  }
+  module {
+    name = "cpu_module"
+    path = "${MODULES_DIR}/modcpu.so"
+  }
+  module {
+    name = "disk_module"
+    path = "${MODULES_DIR}/moddisk.so"
+  }
+  module {
+    name = "load_module"
+    path = "${MODULES_DIR}/modload.so"
+  }
+  module {
+    name = "mem_module"
+    path = "${MODULES_DIR}/modmem.so"
+  }
+  module {
+    name = "net_module"
+    path = "${MODULES_DIR}/modnet.so"
+  }
+  module {
+    name = "proc_module"
+    path = "${MODULES_DIR}/modproc.so"
+  }
+  module {
+    name = "sys_module"
+    path = "${MODULES_DIR}/modsys.so"
+  }
+}
+
+/* The old internal 2.5.x metric array has been replaced by the following
+   collection_group directives.  What follows is the default behavior for
+   collecting and sending metrics that is as close to 2.5.x behavior as
+   possible. */
+
+/* This collection group will cause a heartbeat (or beacon) to be sent every
+   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
+   the age of the running gmond. */
+collection_group {
+  collect_once = yes
+  time_threshold = 20
+  metric {
+    name = "heartbeat"
+  }
+}
+
+/* This collection group will send general info about this host total memory every
+   180 secs.
+   This information doesn't change between reboots and is only collected
+   once. This information needed for heatmap showing */
+ collection_group {
+   collect_once = yes
+   time_threshold = 180
+   metric {
+    name = "mem_total"
+    title = "Memory Total"
+   }
+ }
+
+/* This collection group will send general info about this host every
+   1200 secs.
+   This information doesn't change between reboots and is only collected
+   once. */
+collection_group {
+  collect_once = yes
+  time_threshold = 1200
+  metric {
+    name = "cpu_num"
+    title = "CPU Count"
+  }
+  metric {
+    name = "cpu_speed"
+    title = "CPU Speed"
+  }
+  /* Should this be here? Swap can be added/removed between reboots. */
+  metric {
+    name = "swap_total"
+    title = "Swap Space Total"
+  }
+  metric {
+    name = "boottime"
+    title = "Last Boot Time"
+  }
+  metric {
+    name = "machine_type"
+    title = "Machine Type"
+  }
+  metric {
+    name = "os_name"
+    title = "Operating System"
+  }
+  metric {
+    name = "os_release"
+    title = "Operating System Release"
+  }
+  metric {
+    name = "location"
+    title = "Location"
+  }
+}
+
+/* This collection group will send the status of gexecd for this host
+   every 300 secs.*/
+/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
+collection_group {
+  collect_once = yes
+  time_threshold = 300
+  metric {
+    name = "gexec"
+    title = "Gexec Status"
+  }
+}
+
+/* This collection group will collect the CPU status info every 20 secs.
+   The time threshold is set to 90 seconds.  In honesty, this
+   time_threshold could be set significantly higher to reduce
+   unneccessary  network chatter. */
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* CPU status */
+  metric {
+    name = "cpu_user"
+    value_threshold = "1.0"
+    title = "CPU User"
+  }
+  metric {
+    name = "cpu_system"
+    value_threshold = "1.0"
+    title = "CPU System"
+  }
+  metric {
+    name = "cpu_idle"
+    value_threshold = "5.0"
+    title = "CPU Idle"
+  }
+  metric {
+    name = "cpu_nice"
+    value_threshold = "1.0"
+    title = "CPU Nice"
+  }
+  metric {
+    name = "cpu_aidle"
+    value_threshold = "5.0"
+    title = "CPU aidle"
+  }
+  metric {
+    name = "cpu_wio"
+    value_threshold = "1.0"
+    title = "CPU wio"
+  }
+  /* The next two metrics are optional if you want more detail...
+     ... since they are accounted for in cpu_system.
+  metric {
+    name = "cpu_intr"
+    value_threshold = "1.0"
+    title = "CPU intr"
+  }
+  metric {
+    name = "cpu_sintr"
+    value_threshold = "1.0"
+    title = "CPU sintr"
+  }
+  */
+}
+
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* Load Averages */
+  metric {
+    name = "load_one"
+    value_threshold = "1.0"
+    title = "One Minute Load Average"
+  }
+  metric {
+    name = "load_five"
+    value_threshold = "1.0"
+    title = "Five Minute Load Average"
+  }
+  metric {
+    name = "load_fifteen"
+    value_threshold = "1.0"
+    title = "Fifteen Minute Load Average"
+  }
+}
+
+/* This group collects the number of running and total processes */
+collection_group {
+  collect_every = 80
+  time_threshold = 950
+  metric {
+    name = "proc_run"
+    value_threshold = "1.0"
+    title = "Total Running Processes"
+  }
+  metric {
+    name = "proc_total"
+    value_threshold = "1.0"
+    title = "Total Processes"
+  }
+}
+
+/* This collection group grabs the volatile memory metrics every 40 secs and
+   sends them at least every 180 secs.  This time_threshold can be increased
+   significantly to reduce unneeded network traffic. */
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "mem_free"
+    value_threshold = "1024.0"
+    title = "Free Memory"
+  }
+  metric {
+    name = "mem_shared"
+    value_threshold = "1024.0"
+    title = "Shared Memory"
+  }
+  metric {
+    name = "mem_buffers"
+    value_threshold = "1024.0"
+    title = "Memory Buffers"
+  }
+  metric {
+    name = "mem_cached"
+    value_threshold = "1024.0"
+    title = "Cached Memory"
+  }
+  metric {
+    name = "swap_free"
+    value_threshold = "1024.0"
+    title = "Free Swap Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 300
+  metric {
+    name = "bytes_out"
+    value_threshold = 4096
+    title = "Bytes Sent"
+  }
+  metric {
+    name = "bytes_in"
+    value_threshold = 4096
+    title = "Bytes Received"
+  }
+  metric {
+    name = "pkts_in"
+    value_threshold = 256
+    title = "Packets Received"
+  }
+  metric {
+    name = "pkts_out"
+    value_threshold = 256
+    title = "Packets Sent"
+  }
+}
+
+
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "disk_free"
+    value_threshold = 1.0
+    title = "Disk Space Available"
+  }
+  metric {
+    name = "part_max_used"
+    value_threshold = 1.0
+    title = "Maximum Disk Space Used"
+  }
+  metric {
+    name = "disk_total"
+    value_threshold = 1.0
+    title = "Total Disk Space"
+  }
+}
+
+udp_recv_channel {
+    port = 0
+}
+
+
+include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
+END_OF_GMOND_CORE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondMasterConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_MASTER_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Masters only receive; they never send. */
+udp_recv_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+
+END_OF_GMOND_MASTER_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondSlaveConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_SLAVE_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Slaves only send; they never receive. */
+udp_send_channel {
+  #bind_hostname = yes # Highly recommended, soon to be default.
+                       # This option tells gmond to use a source address
+                       # that resolves to the machine's hostname.  Without
+                       # this, the metrics may appear to come from any
+                       # interface and the DNS names associated with
+                       # those IPs will be used to create the RRDs.
+  host = ${gmondMasterIP}
+  port = ${gmondPort}
+  ttl = 1
+}
+END_OF_GMOND_SLAVE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/rrdcachedLib.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/rrdcachedLib.sh
new file mode 100644
index 0000000..a070fca
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/rrdcachedLib.sh
@@ -0,0 +1,47 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+RRDCACHED_BIN=/usr/bin/rrdcached;
+RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
+RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
+RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
+
+function getRrdcachedLoggedPid()
+{
+    if [ -e "${RRDCACHED_PID_FILE}" ]
+    then
+        echo `cat ${RRDCACHED_PID_FILE}`;
+    fi
+}
+
+function getRrdcachedRunningPid()
+{
+    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
+
+    if [ -n "${rrdcachedLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/setupGanglia.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/setupGanglia.sh
new file mode 100644
index 0000000..704766e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/setupGanglia.sh
@@ -0,0 +1,141 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh
+
+function usage()
+{
+  cat << END_USAGE
+Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
+
+Options:
+  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
+
+  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
+                          Cluster. Without this, we generate slave gmond configuration.
+
+  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
+                          gmond configuration that is generated without this).
+  -o <owner>              Owner
+  -g <group>              Group
+END_USAGE
+}
+
+function instantiateGmetadConf()
+{
+  # gmetad utility library.
+  source ./gmetadLib.sh;
+
+  generateGmetadConf > ${GMETAD_CONF_FILE};
+}
+
+function instantiateGmondConf()
+{
+  # gmond utility library.
+  source ./gmondLib.sh;
+ 
+  gmondClusterName=${1};
+
+  if [ "x" != "x${gmondClusterName}" ]
+  then
+
+    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
+    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
+    
+    # Always blindly generate the core gmond config - that goes on every box running gmond. 
+    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
+
+    isMasterGmond=${2};
+
+    # Decide whether we want to add on the master or slave gmond config.
+    if [ "0" -eq "${isMasterGmond}" ]
+    then
+      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
+    else
+      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
+    fi
+
+    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
+
+  else
+    echo "No gmondClusterName passed in, nothing to instantiate";
+  fi
+}
+
+# main()
+
+gmondClusterName=;
+isMasterGmond=0;
+configureGmetad=0;
+owner='root';
+group='root';
+
+while getopts ":c:mto:g:" OPTION
+do
+  case ${OPTION} in
+    c) 
+      gmondClusterName=${OPTARG};
+      ;;
+    m)
+      isMasterGmond=1;
+      ;;
+    t)
+      configureGmetad=1;
+      ;;
+    o)
+      owner=${OPTARG};
+      ;;
+    g)
+      group=${OPTARG};
+      ;;
+    ?)
+      usage;
+      exit 1;
+  esac
+done
+
+# Initialization.
+createDirectory ${GANGLIA_CONF_DIR};
+createDirectory ${GANGLIA_RUNTIME_DIR};
+# So rrdcached can drop its PID files in here.
+chmod -R o+rw ${GANGLIA_RUNTIME_DIR};
+chown ${owner}:${group} ${GANGLIA_CONF_DIR};
+
+if [ -n "${gmondClusterName}" ]
+then
+
+  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
+  if [ "1" -eq "${configureGmetad}" ]
+  then
+    instantiateGmetadConf;
+  else
+    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
+  fi
+
+elif [ "1" -eq "${configureGmetad}" ]
+then
+  instantiateGmetadConf;
+else
+  usage;
+  exit 2;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startGmetad.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startGmetad.sh
new file mode 100644
index 0000000..b271e06
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startGmetad.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
+source ./rrdcachedLib.sh;
+
+# Before starting gmetad, start rrdcached.
+./startRrdcached.sh;
+
+if [ $? -eq 0 ] 
+then
+    gmetadRunningPid=`getGmetadRunningPid`;
+
+    # Only attempt to start gmetad if there's not already one running.
+    if [ -f "${GMETAD_PID_FILE}" ] && [ -z "${gmetadRunningPid}" ]
+    then
+      rm -f ${GMETAD_PID_FILE}; rm -f /var/lock/subsys/hdp-gmetad
+    fi
+    if [ -z "${gmetadRunningPid}" ]
+    then
+        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
+
+        for i in `seq 0 5`; do
+          gmetadRunningPid=`getGmetadRunningPid`;
+          if [ -n "${gmetadRunningPid}" ]
+          then
+            break;
+          fi
+          sleep 1;
+        done
+
+        if [ -n "${gmetadRunningPid}" ]
+        then
+            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
+        else
+            echo "Failed to start ${GMETAD_BIN}";
+            exit 1;
+        fi
+    else
+        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
+    fi
+else
+    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
+    exit 2;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startGmond.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startGmond.sh
new file mode 100644
index 0000000..cdf4fe0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startGmond.sh
@@ -0,0 +1,85 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function startGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+    gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
+
+    # Only attempt to start gmond if there's not already one running.
+    if [ -f "${gmondPidFileName}" ] && [ -z "${gmondRunningPid}" ]
+    then
+      rm -f ${gmondPidFileName}
+    fi
+    if [ -z "${gmondRunningPid}" ]
+    then
+      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+      if [ -e "${gmondCoreConfFileName}" ]
+      then 
+        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
+
+        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
+
+        for i in `seq 0 5`; do
+          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+          if [ -n "${gmondRunningPid}" ]
+          then
+            break;
+          fi
+          sleep 1;
+        done
+  
+        if [ -n "${gmondRunningPid}" ]
+        then
+            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
+        else
+            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
+            exit 1;
+        fi
+      fi 
+    else
+      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so start 
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        startGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just start the one ${gmondClusterName} that was asked for.
+    startGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startRrdcached.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startRrdcached.sh
new file mode 100644
index 0000000..dc47f39
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/startRrdcached.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only attempt to start rrdcached if there's not already one running.
+if [ -z "${rrdcachedRunningPid}" ]
+then
+    su -s /bin/bash - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+             -b ${RRDCACHED_BASE_DIR} -B -t ${RRDCACHED_WRITE_THREADS} \
+             -w ${RRDCACHED_TIMEOUT} -f ${RRDCACHED_FLUSH_TIMEOUT} -z ${RRDCACHED_DELAY} -F"
+
+    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
+    # this, but it doesn't take sometimes due to a lack of permissions,
+    # so perform the operation explicitly to be super-sure.
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
+
+    # Check to make sure rrdcached actually started up.
+    for i in `seq 0 5`; do
+      rrdcachedRunningPid=`getRrdcachedRunningPid`;
+      if [ -n "${rrdcachedRunningPid}" ]
+        then
+          break;
+      fi
+      sleep 1;
+    done
+
+    if [ -n "${rrdcachedRunningPid}" ]
+    then
+        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
+    else
+        echo "Failed to start ${RRDCACHED_BIN}";
+        exit 1;
+    fi
+
+    #Configure Ganglia Web to work with RRDCached
+    GANGLIA_WEB_CONFIG_FILE=${GANGLIA_WEB_PATH}/conf_default.php
+
+    if [ -f $GANGLIA_WEB_CONFIG_FILE ]
+    then
+      sed -i "s@\$conf\['rrdcached_socket'] =.*@\$conf\['rrdcached_socket'] = \"unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET}\";@" $GANGLIA_WEB_CONFIG_FILE
+      sed -i "s@\$conf\['rrds'] =.*@\$conf\['rrds'] = \"${RRDCACHED_BASE_DIR}\";@" $GANGLIA_WEB_CONFIG_FILE
+      sed -i "s@\$conf\['gmetad_root'] =.*@\$conf\['gmetad_root'] = \"${RRDCACHED_BASE_DIR}\";@" $GANGLIA_WEB_CONFIG_FILE
+    else
+      echo "${GANGLIA_WEB_CONFIG_FILE} can't be found";
+      exit 1;
+    fi
+
+else
+    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopGmetad.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopGmetad.sh
new file mode 100644
index 0000000..86731d2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopGmetad.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${gmetadRunningPid}" ]
+then
+    kill -KILL ${gmetadRunningPid};
+    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
+fi
+
+# Poll again.
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Once we've killed gmetad, there should no longer be a running PID.
+if [ -z "${gmetadRunningPid}" ]
+then
+    # It's safe to stop rrdcached now.
+    ./stopRrdcached.sh;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopGmond.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopGmond.sh
new file mode 100644
index 0000000..8824d2a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopGmond.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function stopGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only go ahead with the termination if we could find a running PID.
+    if [ -n "${gmondRunningPid}" ]
+    then
+      kill -KILL ${gmondRunningPid};
+      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so stop
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        stopGmondForCluster ${gmondClusterName};
+    done
+else
+    stopGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopRrdcached.sh
new file mode 100644
index 0000000..bffc9e5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/stopRrdcached.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${rrdcachedRunningPid}" ]
+then
+    kill -TERM ${rrdcachedRunningPid};
+    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
+    # until we're sure it's well and truly dead. 
+    #
+    # Without this, an immediately following startRrdcached.sh won't do
+    # anything, because it still sees this soon-to-die instance alive,
+    # and the net result is that after a few seconds, there's no
+    # ${RRDCACHED_BIN} running on the box anymore.
+    sleep 5;
+    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
+fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/teardownGanglia.sh
new file mode 100644
index 0000000..8740c27
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/files/teardownGanglia.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh;
+
+# Undo what we did while setting up Ganglia on this box.
+rm -rf ${GANGLIA_CONF_DIR};
+rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/functions.py b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/functions.py
new file mode 100644
index 0000000..b02e688
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/functions.py
@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from resource_management import *
+
+
+def turn_off_autostart(service):
+  if System.get_instance().os_family == "ubuntu":
+    Execute(format("update-rc.d {service} disable"),
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    )
+    Execute(format("service {service} stop"), ignore_failures=True)
+    Execute(format("echo 'manual' > /etc/init/{service}.override")) # disbale upstart job
+  else:
+    Execute(format("chkconfig {service} off"),
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia.py b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia.py
new file mode 100644
index 0000000..69fde27
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia.py
@@ -0,0 +1,97 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+import os
+
+
+def groups_and_users():
+  import params
+
+def config():
+  import params
+
+  shell_cmds_dir = params.ganglia_shell_cmds_dir
+  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
+                 'gmondLib.sh', 'rrdcachedLib.sh',
+                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
+                 'startRrdcached.sh', 'stopGmetad.sh',
+                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
+  Directory(shell_cmds_dir,
+            owner="root",
+            group="root",
+            recursive=True
+  )
+  init_file("gmetad")
+  init_file("gmond")
+  for sh_file in shell_files:
+    shell_file(sh_file)
+  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
+    ganglia_TemplateConfig(conf_file)
+
+
+def init_file(name):
+  import params
+
+  File("/etc/init.d/hdp-" + name,
+       content=StaticFile(name + ".init"),
+       mode=0755
+  )
+
+
+def shell_file(name):
+  import params
+
+  File(params.ganglia_shell_cmds_dir + os.sep + name,
+       content=StaticFile(name),
+       mode=0755
+  )
+
+
+def ganglia_TemplateConfig(name, mode=0755, tag=None):
+  import params
+
+  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
+                 owner="root",
+                 group="root",
+                 template_tag=tag,
+                 mode=mode
+  )
+
+
+def generate_daemon(ganglia_service,
+                    name=None,
+                    role=None,
+                    owner=None,
+                    group=None):
+  import params
+
+  cmd = ""
+  if ganglia_service == "gmond":
+    if role == "server":
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
+    else:
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
+  elif ganglia_service == "gmetad":
+    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
+  else:
+    raise Fail("Unexpected ganglia service")
+  Execute(format(cmd),
+          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
+                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_monitor.py
new file mode 100644
index 0000000..05ceaff
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_monitor.py
@@ -0,0 +1,243 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import functions
+import ganglia_monitor_service
+
+
+class GangliaMonitor(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.configure(env)
+
+    functions.turn_off_autostart(params.gmond_service_name)
+    functions.turn_off_autostart("gmetad") # since the package is installed as well
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    ganglia_monitor_service.monitor("start")
+
+  def stop(self, env, rolling_restart=False):
+    ganglia_monitor_service.monitor("stop")
+
+
+  def status(self, env):
+    import status_params
+    pid_file_name = 'gmond.pid'
+    pid_file_count = 0
+    pid_dir = status_params.pid_dir
+    # Recursively check all existing gmond pid files
+    for cur_dir, subdirs, files in os.walk(pid_dir):
+      for file_name in files:
+        if file_name == pid_file_name:
+          pid_file = os.path.join(cur_dir, file_name)
+          check_process_status(pid_file)
+          pid_file_count += 1
+    if pid_file_count == 0: # If no any pid file is present
+      raise ComponentIsNotRunning()
+
+
+  def configure(self, env):
+    import params
+
+    ganglia.groups_and_users()
+
+    Directory(params.ganglia_conf_dir,
+              owner="root",
+              group=params.user_group,
+              recursive=True
+    )
+
+    ganglia.config()
+
+    self.generate_slave_configs()
+
+    Directory(path.join(params.ganglia_dir, "conf.d"),
+              owner="root",
+              group=params.user_group
+    )
+
+    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "gmond.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+    if params.is_ganglia_server_host:
+      self.generate_master_configs()
+
+      if len(params.gmond_apps) != 0:
+        self.generate_app_configs()
+        pass
+      pass
+
+
+  def generate_app_configs(self):
+    import params
+
+    for gmond_app in params.gmond_apps:
+      generate_daemon("gmond",
+                      name=gmond_app[0],
+                      role="server",
+                      owner="root",
+                      group=params.user_group)
+      generate_daemon("gmond",
+                      name = gmond_app[0],
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+    pass
+
+  def generate_slave_configs(self):
+    import params
+
+    generate_daemon("gmond",
+                    name = "HDPSlaves",
+                    role = "monitor",
+                    owner = "root",
+                    group = params.user_group)
+
+
+  def generate_master_configs(self):
+    import params
+
+    if params.has_namenodes:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_jobtracker:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_masters:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_resourcemanager:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_nodemanager:
+      generate_daemon("gmond",
+                      name = "HDPNodeManager",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_historyserver:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_slaves:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_nimbus_server:
+      generate_daemon("gmond",
+                      name = "HDPNimbus",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_supervisor_server:
+      generate_daemon("gmond",
+                      name = "HDPSupervisor",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_kafka_broker:
+      generate_daemon("gmond",
+                      name = "HDPKafka",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_journalnode:
+      generate_daemon("gmond",
+                      name = "HDPJournalNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    generate_daemon("gmond",
+                    name = "HDPSlaves",
+                    role = "server",
+                    owner = "root",
+                    group = params.user_group)
+
+
+if __name__ == "__main__":
+  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_monitor_service.py
new file mode 100644
index 0000000..cf7a4b1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_monitor_service.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def monitor(action=None):# 'start' or 'stop'
+  Execute(
+    format(
+      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
+    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_server.py
new file mode 100644
index 0000000..3492fff
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_server.py
@@ -0,0 +1,123 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import functions
+import ganglia_server_service
+
+
+class GangliaServer(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.configure(env)
+    
+    functions.turn_off_autostart(params.gmond_service_name) # since the package is installed as well
+    functions.turn_off_autostart("gmetad")
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    ganglia_server_service.server("start")
+
+  def stop(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+    ganglia_server_service.server("stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/gmetad.pid")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    ganglia.groups_and_users()
+    ganglia.config()
+
+    generate_daemon("gmetad",
+                    name = "gmetad",
+                    role = "server",
+                    owner = "root",
+                    group = params.user_group)
+
+    change_permission()
+    server_files()
+    File(path.join(params.ganglia_dir, "gmetad.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+
+def change_permission():
+  import params
+
+  Directory(os.path.abspath(os.path.join(params.ganglia_runtime_dir, "..")),
+            mode=0755,
+            recursive=True
+  )
+  Directory(params.dwoo_path,
+            mode=0755,
+            recursive=True
+  )
+  Execute(format("chown -R {web_user} {dwoo_path}"))
+
+def server_files():
+  import params
+
+  rrd_py_path = params.rrd_py_path
+  Directory(rrd_py_path,
+            recursive=True
+  )
+  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
+  TemplateConfig(rrd_py_file_path,
+                 owner="root",
+                 group="root",
+                 mode=0755
+  )
+  rrd_file_owner = params.gmetad_user
+
+  Directory(params.rrdcached_base_dir,
+            owner=rrd_file_owner,
+            group=rrd_file_owner,
+            mode=0755,
+            recursive=True
+  )
+  
+  if System.get_instance().os_family in ["ubuntu","suse"]:
+    File( params.ganglia_apache_config_file,
+      content = Template("ganglia.conf.j2"),
+      mode = 0644
+    )
+
+
+if __name__ == "__main__":
+  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_server_service.py
new file mode 100644
index 0000000..b93e3f8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/ganglia_server_service.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def server(action=None):# 'start' or 'stop'
+  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+  Execute(format(command),
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )
+  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/params.py
new file mode 100644
index 0000000..5fe6335
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/package/scripts/params.py
@@ -0,0 +1,164 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+import os
+
+config = Script.get_config()
+
+user_group = config['configurations']['cluster-env']["user_group"]
+ganglia_conf_dir = default("/configurations/ganglia-env/ganglia_conf_dir", "/etc/ganglia/hdp")
+ganglia_dir = "/etc/ganglia"
+ganglia_runtime_dir = config['configurations']['ganglia-env']["ganglia_runtime_dir"]
+ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
+
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+
+gmond_add_clusters_str = default("/configurations/ganglia-env/additional_clusters", None)
+if gmond_add_clusters_str and gmond_add_clusters_str.isspace():
+  gmond_add_clusters_str = None
+
+gmond_app_strs = [] if gmond_add_clusters_str is None else gmond_add_clusters_str.split(',')
+gmond_apps = []
+
+for x in gmond_app_strs:
+  a,b = x.strip().split(':')
+  gmond_apps.append((a.strip(),b.strip()))
+
+if System.get_instance().os_family == "ubuntu":
+  gmond_service_name = "ganglia-monitor"
+  modules_dir = "/usr/lib/ganglia"
+else:
+  gmond_service_name = "gmond"
+  modules_dir = "/usr/lib64/ganglia"
+
+webserver_group = "apache"
+rrdcached_base_dir = config['configurations']['ganglia-env']["rrdcached_base_dir"]
+rrdcached_timeout = default("/configurations/ganglia-env/rrdcached_timeout", 3600)
+rrdcached_flush_timeout = default("/configurations/ganglia-env/rrdcached_flush_timeout", 7200)
+rrdcached_delay = default("/configurations/ganglia-env/rrdcached_delay", 1800)
+rrdcached_write_threads = default("/configurations/ganglia-env/rrdcached_write_threads", 4)
+
+ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
+
+hostname = config["hostname"]
+namenode_host = set(default("/clusterHostInfo/namenode_host", []))
+jtnode_host = set(default("/clusterHostInfo/jtnode_host", []))
+rm_host = set(default("/clusterHostInfo/rm_host", []))
+hs_host = set(default("/clusterHostInfo/hs_host", []))
+hbase_master_hosts = set(default("/clusterHostInfo/hbase_master_hosts", []))
+# datanodes are marked as slave_hosts
+slave_hosts = set(default("/clusterHostInfo/slave_hosts", []))
+tt_hosts = set(default("/clusterHostInfo/mapred_tt_hosts", []))
+nm_hosts = set(default("/clusterHostInfo/nm_hosts", []))
+hbase_rs_hosts = set(default("/clusterHostInfo/hbase_rs_hosts", []))
+flume_hosts = set(default("/clusterHostInfo/flume_hosts", []))
+jn_hosts = set(default("/clusterHostInfo/journalnode_hosts", []))
+nimbus_server_hosts = set(default("/clusterHostInfo/nimbus_hosts", []))
+supervisor_server_hosts = set(default("/clusterHostInfo/supervisor_hosts", []))
+kafka_broker_hosts =  set(default("/clusterHostInfo/kafka_broker_hosts", []))
+kafka_ganglia_port = default("/configurations/kafka-broker/kafka.ganglia.metrics.port", 8671)
+
+pure_slave = not hostname in (namenode_host | jtnode_host | rm_host | hs_host | \
+                              hbase_master_hosts | slave_hosts | tt_hosts | hbase_rs_hosts | \
+                              flume_hosts | nm_hosts | jn_hosts | nimbus_server_hosts | \
+                              supervisor_server_hosts)
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+is_tasktracker = hostname in tt_hosts
+is_nodemanager = hostname in nm_hosts
+is_hbase_rs = hostname in hbase_rs_hosts
+is_flume = hostname in flume_hosts
+is_ganglia_server_host = (hostname == ganglia_server_host)
+is_jn_host = hostname in jn_hosts
+is_nimbus_host = hostname in nimbus_server_hosts
+is_supervisor_host = hostname in supervisor_server_hosts
+
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_historyserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_tasktracker = not len(tt_hosts) == 0
+has_nodemanager = not len(nm_hosts) == 0
+has_hbase_rs = not len(hbase_rs_hosts) == 0
+has_flume = not len(flume_hosts) == 0
+has_journalnode = not len(jn_hosts) == 0
+has_nimbus_server = not len(nimbus_server_hosts) == 0
+has_supervisor_server = not len(supervisor_server_hosts) == 0
+has_kafka_broker = not len(kafka_broker_hosts) == 0
+
+ganglia_cluster_names = {
+  "jn_hosts": [("HDPJournalNode", 8654)],
+  "flume_hosts": [("HDPFlumeServer", 8655)],
+  "hbase_rs_hosts": [("HDPHBaseRegionServer", 8656)],
+  "nm_hosts": [("HDPNodeManager", 8657)],
+  "mapred_tt_hosts": [("HDPTaskTracker", 8658)],
+  "slave_hosts": [("HDPDataNode", 8659)],
+  "namenode_host": [("HDPNameNode", 8661)],
+  "jtnode_host": [("HDPJobTracker", 8662)],
+  "hbase_master_hosts": [("HDPHBaseMaster", 8663)],
+  "rm_host": [("HDPResourceManager", 8664)],
+  "hs_host": [("HDPHistoryServer", 8666)],
+  "nimbus_hosts": [("HDPNimbus", 8649)],
+  "supervisor_hosts": [("HDPSupervisor", 8650)],
+  "kafka_broker_hosts": [("HDPKafka", kafka_ganglia_port)],
+  "ReservedPort1": [("ReservedPort1", 8667)],
+  "ReservedPort2": [("ReservedPort2", 8668)],
+  "ReservedPort3": [("ReservedPort3", 8669)]
+}
+
+ganglia_clusters = [("HDPSlaves", 8660)]
+
+for key in ganglia_cluster_names:
+  property_name = format("/clusterHostInfo/{key}")
+  hosts = set(default(property_name, []))
+  if not len(hosts) == 0:
+    for x in ganglia_cluster_names[key]:
+      ganglia_clusters.append(x)
+
+if len(gmond_apps) > 0:
+  for gmond_app in gmond_apps:
+    ganglia_clusters.append(gmond_app)
+
+ganglia_apache_config_file = "/etc/apache2/conf.d/ganglia.conf"
+ganglia_web_path="/var/www/html/ganglia"
+if System.get_instance().os_family == "suse":
+  rrd_py_path = '/srv/www/cgi-bin'
+  dwoo_path = '/var/lib/ganglia-web/dwoo'
+  web_user = "wwwrun"
+  # for upgrade purposes as path to ganglia was changed
+  if not os.path.exists(ganglia_web_path):
+    ganglia_web_path='/srv/www/htdocs/ganglia'
+
+elif  System.get_instance().os_family == "redhat":
+  rrd_py_path = '/var/www/cgi-bin'
+  dwoo_path = '/var/lib/ganglia/dwoo'
+  web_user = "apache"
+elif  System.get_instance().os_family == "ubuntu":
+  rrd_py_path = '/usr/lib/cgi-bin'
+  ganglia_web_path = '/usr/share/ganglia-webfrontend'
+  dwoo_path = '/var/lib/ganglia/dwoo'
+  web_user = "www-data"


[12/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json
new file mode 100644
index 0000000..088626a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metrics.json
@@ -0,0 +1,7840 @@
+{
+  "NAMENODE": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "dfs.FSNamesystem.TotalLoad",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotal": {
+            "metric": "dfs.FSNamesystem.CapacityTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsed": {
+            "metric": "dfs.FSNamesystem.CapacityUsed",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemaining": {
+            "metric": "dfs.FSNamesystem.CapacityRemaining",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+            "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
+            "pointInTime": false,
+            "temporal": true
+          },          
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "dfs.FSNamesystem.BlockCapacity",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "dfs.namenode.GetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesAppended": {
+            "metric": "dfs.namenode.FilesAppended",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "dfs.FSNamesystem.CapacityTotalGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "dfs.FSNamesystem.CapacityUsedGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "dfs.namenode.AddBlockOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesDeleted": {
+            "metric": "dfs.namenode.FilesDeleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "dfs.namenode.SyncsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "dfs.namenode.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "dfs.namenode.FilesCreated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesRenamed": {
+            "metric": "dfs.namenode.FilesRenamed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
+            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "dfs.namenode.GetBlockLocations",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "dfs.namenode.FileInfoOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/DeleteFileOps": {
+            "metric": "dfs.namenode.DeleteFileOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.rpc.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "dfs.namenode.FilesInGetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.rpc.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "dfs.namenode.SyncsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "dfs.FSNamesystem.FilesTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "dfs.FSNamesystem.ExcessBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "dfs.namenode.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/SafemodeTime": {
+            "metric": "dfs.namenode.SafemodeTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "dfs.FSNamesystem.BlocksTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "dfs.namenode.TransactionsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "dfs.namenode.TransactionsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "dfs.FSNamesystem.MissingBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "dfs.FSNamesystem.CorruptBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/fsImageLoadTime": {
+            "metric": "dfs.namenode.FsImageLoadTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "dfs.namenode.CreateFileOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/dfs/namenode/Used": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memMaxM":{
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/HostName": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/Safemode": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CorruptBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/LiveNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/DecomNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonDfsUsedSpace": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Safemode": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/DecomNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsed": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonHeapMemoryUsed": {
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/DeadNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/PercentUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Free": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Total": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/HeapMemoryMax": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingReplicationBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/UnderReplicatedBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/MissingBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.PendingDeletionBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/BlocksTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/LiveNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/DeadNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/HeapMemoryUsed": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.FilesTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Version": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.ExcessBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonHeapMemoryMax": {
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.ScheduledReplicationBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlocksTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityTotal": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemainingGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.MissingBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Threads": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CorruptBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/NameDirStatuses": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.UnderReplicatedBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logError": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/Version": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/NonDfsUsedSpace": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalBlocks": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityNonDFSUsed",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "dfs.FSNamesystem.TotalLoad",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotal": {
+            "metric": "dfs.FSNamesystem.CapacityTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsed": {
+            "metric": "dfs.FSNamesystem.CapacityUsed",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemaining": {
+            "metric": "dfs.FSNamesystem.CapacityRemaining",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+            "metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "dfs.FSNamesystem.BlockCapacity",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "dfs.namenode.GetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesAppended": {
+            "metric": "dfs.namenode.FilesAppended",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "dfs.FSNamesystem.CapacityTotalGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "dfs.FSNamesystem.CapacityUsedGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "dfs.namenode.AddBlockOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesDeleted": {
+            "metric": "dfs.namenode.FilesDeleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "dfs.namenode.SyncsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "dfs.namenode.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "dfs.namenode.FilesCreated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesRenamed": {
+            "metric": "dfs.namenode.FilesRenamed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
+            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "dfs.namenode.GetBlockLocations",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "dfs.namenode.FileInfoOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/DeleteFileOps": {
+            "metric": "dfs.namenode.DeleteFileOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.rpc.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "dfs.namenode.FilesInGetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.rpc.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "dfs.namenode.SyncsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "dfs.FSNamesystem.FilesTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "dfs.FSNamesystem.ExcessBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "dfs.namenode.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/SafemodeTime": {
+            "metric": "dfs.namenode.SafemodeTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "dfs.FSNamesystem.BlocksTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "dfs.namenode.TransactionsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "dfs.namenode.TransactionsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "dfs.FSNamesystem.MissingBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "dfs.FSNamesystem.CorruptBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/fsImageLoadTime": {
+            "metric": "dfs.namenode.FsImageLoadTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "dfs.namenode.CreateFileOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/dfs/namenode/Used": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memMaxM":{
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.GetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/HostName": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.tag.Hostname",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/runtime/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.FsyncAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetFileInfoAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CompleteAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetBlockLocationsNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.AddBlockOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "Ha

<TRUNCATED>

[10/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer-err.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer-err.log b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer-err.log
new file mode 100644
index 0000000..d7c6704
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer-err.log
@@ -0,0 +1,1032 @@
+14/07/28 17:01:48 INFO balancer.Balancer: Using a threshold of 5.0
+14/07/28 17:01:48 INFO balancer.Balancer: namenodes = [hdfs://evhubudsd1aae.budapest.epam.com:8020]
+14/07/28 17:01:48 INFO balancer.Balancer: p         = Balancer.Parameters[BalancingPolicy.Node, threshold=5.0]
+14/07/28 17:01:49 INFO balancer.Balancer: Block token params received from NN: keyUpdateInterval=600 min(s), tokenLifetime=600 min(s)
+14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
+14/07/28 17:01:49 INFO balancer.Balancer: Balancer will update its block keys every 150 minute(s)
+14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:01:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.887235026238486]]
+14/07/28 17:01:49 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.178140109955496]]
+14/07/28 17:01:49 INFO balancer.Balancer: Need to move 5.74 GB to make the cluster balanced.
+14/07/28 17:01:49 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:01:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:01:57 INFO balancer.Balancer: Moving block 1073950748 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:01:58 INFO balancer.Balancer: Moving block 1073939272 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:02:06 INFO balancer.Balancer: Moving block 1073863504 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:02:13 INFO balancer.Balancer: Moving block 1073863516 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:02:31 INFO balancer.Balancer: Moving block 1073743089 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:03:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.803451571241915]]
+14/07/28 17:03:00 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.262867215362437]]
+14/07/28 17:03:00 INFO balancer.Balancer: Need to move 5.58 GB to make the cluster balanced.
+14/07/28 17:03:00 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:03:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073937443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926003 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073916372 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926002 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073920016 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:03:05 INFO balancer.Balancer: Moving block 1073937461 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:03:11 INFO balancer.Balancer: Moving block 1073743437 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:03:20 INFO balancer.Balancer: Moving block 1073743443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:03:31 INFO balancer.Balancer: Moving block 1073743449 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:03:34 INFO balancer.Balancer: Moving block 1073743440 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:04:07 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.70875539052811]]
+14/07/28 17:04:07 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.35756339607624]]
+14/07/28 17:04:07 INFO balancer.Balancer: Need to move 5.40 GB to make the cluster balanced.
+14/07/28 17:04:07 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:04:07 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:04:07 INFO balancer.Balancer: Moving block 1073743776 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073915941 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930161 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073908316 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:04:09 INFO balancer.Balancer: Moving block 1073930163 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:04:51 INFO balancer.Balancer: Moving block 1073947549 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:05:04 INFO balancer.Balancer: Moving block 1073863141 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:06 INFO balancer.Balancer: Moving block 1073863139 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:05:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.53815392807349]]
+14/07/28 17:05:14 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.528164858530864]]
+14/07/28 17:05:14 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
+14/07/28 17:05:14 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:05:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945158 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918874 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918873 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945162 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918867 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073914540 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918868 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073931861 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:05:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.538117645568114]]
+14/07/28 17:05:50 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.52820114103624]]
+14/07/28 17:05:50 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
+14/07/28 17:05:50 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:05:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073916888 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073925481 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073920767 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073908143 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073911961 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073929306 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:09 INFO balancer.Balancer: Moving block 1073863170 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:06:33 INFO balancer.Balancer: Moving block 1073929250 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:35 INFO balancer.Balancer: Moving block 1073863186 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:06:56 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.407811418798076]]
+14/07/28 17:06:56 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.658507367806276]]
+14/07/28 17:06:56 INFO balancer.Balancer: Need to move 4.81 GB to make the cluster balanced.
+14/07/28 17:06:56 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:06:56 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073919724 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073915864 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073910902 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949844 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926217 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073919721 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926320 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073946575 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949843 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:07:33 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.4068167244793]]
+14/07/28 17:07:33 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.659502062125057]]
+14/07/28 17:07:33 INFO balancer.Balancer: Need to move 4.80 GB to make the cluster balanced.
+14/07/28 17:07:33 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:07:33 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073948620 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073917051 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:07:34 INFO balancer.Balancer: Moving block 1073924651 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:07:40 INFO balancer.Balancer: Moving block 1073742834 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:08:55 INFO balancer.Balancer: Moving block 1073894040 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:08:56 INFO balancer.Balancer: Moving block 1073932476 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:08:59 INFO balancer.Balancer: Moving block 1073742598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:00 INFO balancer.Balancer: Moving block 1073893997 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:09:11 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.144332676814294]]
+14/07/28 17:09:11 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92198610979006]]
+14/07/28 17:09:11 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
+14/07/28 17:09:11 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:09:11 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920127 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743556 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743557 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073929950 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073942945 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920115 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743559 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073947343 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920075 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:09:47 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.14396676101451]]
+14/07/28 17:09:47 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92215625345692]]
+14/07/28 17:09:47 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
+14/07/28 17:09:47 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:09:47 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951772 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951752 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951754 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:52 INFO balancer.Balancer: Moving block 1073951747 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:09:56 INFO balancer.Balancer: Moving block 1073951765 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:10:53 INFO balancer.Balancer: Moving block 1073951746 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951745 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951744 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:11:24 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.9413931647133]]
+14/07/28 17:11:24 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:11:24 INFO balancer.Balancer: Need to move 3.89 GB to make the cluster balanced.
+14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 5.84 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 2.64 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 1.31 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:11:24 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940539 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940537 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927798 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073935420 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927775 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073923954 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073918163 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073949253 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073931581 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073923922 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073931532 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073949248 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073923928 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073927787 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073949252 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073906578 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073914353 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073931557 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073910459 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:12:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.923538618186065]]
+14/07/28 17:12:00 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:12:00 INFO balancer.Balancer: Need to move 3.86 GB to make the cluster balanced.
+14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 2.61 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 7.18 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:12:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073949133 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.7:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945194 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927453 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923118 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905689 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914494 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905688 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923119 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914488 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905681 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905677 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927648 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945235 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945226 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073910053 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927664 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:12:29 INFO balancer.Balancer: Moving block 1073905173 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905177 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905171 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:21 INFO balancer.Balancer: Moving block 1073905175 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:27 INFO balancer.Balancer: Moving block 1073905172 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:13:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.60177342833359]]
+14/07/28 17:13:37 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:13:37 INFO balancer.Balancer: Need to move 3.23 GB to make the cluster balanced.
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.73 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 375.17 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.00 GB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.03 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:13:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914692 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927391 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927383 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923582 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905952 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914693 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923467 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918495 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923466 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948829 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945548 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948902 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945546 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905987 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945549 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918570 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945542 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073927370 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073914708 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.8:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073948908 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073918565 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073923572 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:13:46 INFO balancer.Balancer: Moving block 1073936056 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:49 INFO balancer.Balancer: Moving block 1073936057 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:13:52 INFO balancer.Balancer: Moving block 1073936063 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936045 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936034 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936032 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936033 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:14:41 INFO balancer.Balancer: Moving block 1073936036 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:15:13 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.2458785989085]]
+14/07/28 17:15:13 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:15:13 INFO balancer.Balancer: Need to move 2.53 GB to make the cluster balanced.
+14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 5.46 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 683.02 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:15:13 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934407 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073926699 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073907624 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930612 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073950332 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934387 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930508 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934414 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073945924 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073922816 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073934411 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073926698 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073922838 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073919113 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073922843 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073907649 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073950223 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:15:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.23893576243181]]
+14/07/28 17:15:49 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:15:49 INFO balancer.Balancer: Need to move 2.52 GB to make the cluster balanced.
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 375.06 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 4.44 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 1.33 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:15:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073931740 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073927810 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923141 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073910191 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073905793 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073940704 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949348 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936134 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914594 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949356 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.8:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936148 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936164 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936158 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949359 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073918912 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914616 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936151 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923999 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:15:50 INFO balancer.Balancer: Moving block 1073940722 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073927855 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073906497 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073949350 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.224:50010 is succeeded.
+14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073945051 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:16:25 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.236639727566796]]
+14/07/28 17:16:25 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:16:25 INFO balancer.Balancer: Need to move 2.51 GB to make the cluster balanced.
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 2.36 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 463.99 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:16:25 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942946 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947339 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073912361 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926131 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947341 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073929961 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743570 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916254 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743604 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743581 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926130 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073920078 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916287 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073933727 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908503 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743586 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743580 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937539 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942916 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743590 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947329 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743599 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743600 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073895265 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937542 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916258 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916286 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:16:47 INFO balancer.Balancer: Moving block 1073862841 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:17:01 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1720712908457]]
+14/07/28 17:17:01 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:17:01 INFO balancer.Balancer: Need to move 2.39 GB to make the cluster balanced.
+14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 698.32 MB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:17:01 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915689 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073946573 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915690 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915841 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073919491 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915694 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915842 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073949829 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073895888 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949830 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073922418 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073931011 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949848 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904475 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073946583 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904561 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949813 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073915703 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073926226 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:17:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.17123487505752]]
+14/07/28 17:17:37 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:17:37 INFO balancer.Balancer: Need to move 2.38 GB to make the cluster balanced.
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.23 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 373.37 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.76 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
+14/07/28 17:17:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951505 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951406 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951465 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951428 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951479 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951294 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951363 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951445 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951368 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951466 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951325 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.224:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951296 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951333 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951315 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951502 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951383 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951489 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951504 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951313 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951326 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951310 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073951520 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.1:50010 is succeeded.
+14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073864141 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:18:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.13074467796647]]
+14/07/28 17:18:14 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:18:14 INFO balancer.Balancer: Need to move 2.31 GB to make the cluster balanced.
+14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 9.08 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 729.65 MB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:18:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935830 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931492 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931497 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073913899 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910416 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928121 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931496 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927763 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935825 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935414 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928117 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928114 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935419 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935418 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910423 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
+14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073949598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:18:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1305062958578]]
+14/07/28 17:18:50 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:18:50 INFO balancer.Balancer: Need to move 2.30 GB to make the cluster balanced.
+14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 895.07 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
+14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 7.38 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:18:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930642 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950456 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.10:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934505 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950457 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.8:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934524 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930646 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073915219 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934502 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930640 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073926854 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934510 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
+14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934503 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926851 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
+14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926857 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073930652 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.3:50010 is succeeded.
+14/07/28 17:18:52 INFO balancer.Balancer: Moving block 1073930651 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
+14/07/28 17:19:02 INFO balancer.Balancer: Moving block 1073934496 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:03 INFO balancer.Balancer: Moving block 1073934497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
+14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
+14/07/28 17:19:26 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.07965400229293]]
+14/07/28 17:19:26 INFO balancer.Balancer: 0 underutilized: []
+14/07/28 17:19:26 INFO balancer.Balancer: Need to move 2.21 GB to make the cluster balanced.
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 333.25 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 881.78 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.17 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
+14/07/28 17:19:26 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073931910 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
+14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073905704 from 10.253.130.9:50010 to 10.253.130.7:50010 through

<TRUNCATED>

[25/37] ambari git commit: AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index 826b100..9017dd3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -27,12 +27,16 @@ from stacks.utils.RMFTestCase import *
 import socket
 
 class TestHiveServer(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertNoMoreResources()
@@ -43,10 +47,12 @@ class TestHiveServer(RMFTestCase):
   def test_start_default(self, socket_mock, popen_mock):
     s = socket_mock.return_value
     
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                          classname = "HiveServer",
                          command = "start",
-                         config_file="default.json"
+                         config_file="default.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assert_configure_default()
@@ -71,10 +77,12 @@ class TestHiveServer(RMFTestCase):
 
   @patch("socket.socket")
   def test_stop_default(self, socket_mock):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "stop",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive-server.pid`',
@@ -89,10 +97,12 @@ class TestHiveServer(RMFTestCase):
 
     
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertNoMoreResources()
@@ -102,10 +112,12 @@ class TestHiveServer(RMFTestCase):
   def test_start_secured(self, socket_mock, check_fs_root_mock):
     s = socket_mock.return_value
 
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "start",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assert_configure_secured()
@@ -132,10 +144,12 @@ class TestHiveServer(RMFTestCase):
 
   @patch("socket.socket")
   def test_stop_secured(self, socket_mock):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "stop",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive-server.pid`',
@@ -516,10 +530,12 @@ class TestHiveServer(RMFTestCase):
     time_mock.side_effect = [0, 1000, 2000, 3000, 4000]
     
     try:
-      self.executeScript("2.0.6/services/HIVE/package/scripts/hive_server.py",
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                            classname = "HiveServer",
                            command = "start",
-                           config_file="default.json"
+                           config_file="default.json",
+                           hdp_stack_version = self.STACK_VERSION,
+                           target = RMFTestCase.TARGET_COMMON_SERVICES
       )
       
       self.fail("Script failure due to socket error was expected")

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
index d13b2c3..6da0ba5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
@@ -25,14 +25,18 @@ import  resource_management.libraries.functions
 @patch.object(resource_management.libraries.functions, "get_unique_id_and_date", new = MagicMock(return_value=''))
 @patch("socket.socket")
 class TestServiceCheck(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   @patch("sys.exit")
   def test_service_check_default(self, sys_exit_mock, socket_mock):
 
-    self.executeScript("2.0.6/services/HIVE/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                         classname="HiveServiceCheck",
                         command="service_check",
-                        config_file="default.json"
+                        config_file="default.json",
+                        hdp_stack_version = self.STACK_VERSION,
+                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/hcatSmoke.sh',
                         content = StaticFile('hcatSmoke.sh'),
@@ -85,10 +89,12 @@ class TestServiceCheck(RMFTestCase):
   @patch("sys.exit")
   def test_service_check_secured(self, sys_exit_mock, socket_mock):
 
-    self.executeScript("2.0.6/services/HIVE/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                         classname="HiveServiceCheck",
                         command="service_check",
-                        config_file="secured.json"
+                        config_file="secured.json",
+                        hdp_stack_version = self.STACK_VERSION,
+                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; ',
                               user = 'ambari-qa',

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_mysql_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_mysql_server.py
index 735e380..79c777b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_mysql_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_mysql_server.py
@@ -21,21 +21,27 @@ from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 
 class TestMySqlServer(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/mysql_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mysql_server.py",
                        classname = "MysqlServer",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertNoMoreResources()
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/mysql_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mysql_server.py",
                        classname = "MysqlServer",
                        command = "start",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', ('sed',
@@ -52,10 +58,12 @@ class TestMySqlServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/mysql_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mysql_server.py",
                        classname = "MysqlServer",
                        command = "stop",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', ('service','mysql','stop'),
                               logoutput = True,
@@ -66,19 +74,23 @@ class TestMySqlServer(RMFTestCase):
 
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/mysql_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mysql_server.py",
                        classname = "MysqlServer",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertNoMoreResources()
 
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/mysql_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mysql_server.py",
                        classname = "MysqlServer",
                        command = "start",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', ('sed',
@@ -95,10 +107,12 @@ class TestMySqlServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/mysql_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mysql_server.py",
                        classname = "MysqlServer",
                        command = "stop",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assertResourceCalled('Execute', ('service','mysql','stop'),
@@ -109,19 +123,23 @@ class TestMySqlServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_clean_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/mysql_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mysql_server.py",
                        classname = "MysqlServer",
                        command = "clean",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_clean_default()
     self.assertNoMoreResources()
 
   def test_clean_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/mysql_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mysql_server.py",
                        classname = "MysqlServer",
                        command = "clean",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_clean_secured()
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
index fa06253..7ebd4b9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
@@ -23,21 +23,27 @@ from stacks.utils.RMFTestCase import *
 @patch("os.path.isfile", new = MagicMock(return_value=True))
 @patch("glob.glob", new = MagicMock(return_value=["one", "two"]))
 class TestWebHCatServer(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/webhcat_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
                        classname = "WebHCatServer",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertNoMoreResources()
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/webhcat_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
                        classname = "WebHCatServer",
                        command = "start",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assert_configure_default()
@@ -48,10 +54,12 @@ class TestWebHCatServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/webhcat_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
                        classname = "WebHCatServer",
                        command = "stop",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr /usr/lib/hcatalog/sbin/webhcat_server.sh stop',
@@ -63,20 +71,24 @@ class TestWebHCatServer(RMFTestCase):
     self.assertNoMoreResources()
 
     def test_configure_secured(self):
-      self.executeScript("2.0.6/services/HIVE/package/scripts/webhcat_server.py",
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
                          classname = "WebHCatServer",
                          command = "configure",
-                         config_file="secured.json"
+                         config_file="secured.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
       )
 
       self.assert_configure_secured()
       self.assertNoMoreResources()
 
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/webhcat_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
                        classname = "WebHCatServer",
                        command = "start",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assert_configure_secured()
@@ -87,10 +99,12 @@ class TestWebHCatServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/webhcat_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/webhcat_server.py",
                        classname = "WebHCatServer",
                        command = "stop",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr /usr/lib/hcatalog/sbin/webhcat_server.sh stop',

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
index c19c312..f02b55b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
@@ -22,12 +22,16 @@ from stacks.utils.RMFTestCase import *
 import json
 
 class TestOozieClient(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "OOZIE/4.0.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/OOZIE/package/scripts/oozie_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_client.py",
                        classname = "OozieClient",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/oozie/conf',
                               owner = 'oozie',
@@ -76,10 +80,12 @@ class TestOozieClient(RMFTestCase):
 
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/OOZIE/package/scripts/oozie_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_client.py",
                        classname = "OozieClient",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/oozie/conf',
                               owner = 'oozie',
@@ -134,10 +140,12 @@ class TestOozieClient(RMFTestCase):
 
 
     default_json['hostLevelParams']['stack_version']= '2.2'
-    self.executeScript("2.0.6/services/OOZIE/package/scripts/oozie_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_client.py",
                        classname = "OozieClient",
                        command = "configure",
-                       config_dict=default_json
+                       config_dict=default_json,
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/oozie/conf',
                               owner = 'oozie',

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index dd768e2..1fa2677 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -21,11 +21,16 @@ from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 
 class TestOozieServer(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "OOZIE/4.0.0.2.0/package"
+  STACK_VERSION = "2.0.6"
+
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/OOZIE/package/scripts/oozie_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                        classname = "OozieServer",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertNoMoreResources()
@@ -34,10 +39,12 @@ class TestOozieServer(RMFTestCase):
   @patch("os.path.isfile")
   def test_start_default(self, isfile_mock):
     isfile_mock.return_value = True
-    self.executeScript("2.0.6/services/OOZIE/package/scripts/oozie_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                          classname = "OozieServer",
                          command = "start",
-                         config_file="default.json"
+                         config_file="default.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
     self.assert_configure_default()
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run',
@@ -58,10 +65,12 @@ class TestOozieServer(RMFTestCase):
 
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/OOZIE/package/scripts/oozie_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                          classname = "OozieServer",
                          command = "stop",
-                         config_file="default.json"
+                         config_file="default.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-stop.sh',
         only_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
@@ -74,10 +83,12 @@ class TestOozieServer(RMFTestCase):
 
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/OOZIE/package/scripts/oozie_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                        classname = "OozieServer",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertNoMoreResources()
@@ -85,10 +96,12 @@ class TestOozieServer(RMFTestCase):
   @patch("os.path.isfile")
   def test_start_secured(self, isfile_mock):
     isfile_mock.return_value = True
-    self.executeScript("2.0.6/services/OOZIE/package/scripts/oozie_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                          classname = "OozieServer",
                          command = "start",
-                         config_file="secured.json"
+                         config_file="secured.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run',
@@ -108,10 +121,12 @@ class TestOozieServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/OOZIE/package/scripts/oozie_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                          classname = "OozieServer",
                          command = "stop",
-                         config_file="secured.json"
+                         config_file="secured.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-stop.sh',
         only_if = 'ls /var/run/oozie/oozie.pid >/dev/null 2>&1 && ps -p `cat /var/run/oozie/oozie.pid` >/dev/null 2>&1',
@@ -434,10 +449,12 @@ class TestOozieServer(RMFTestCase):
         default_json = json.load(f)
 
       default_json['hostLevelParams']['stack_version']= '2.2'
-      self.executeScript("2.0.6/services/OOZIE/package/scripts/oozie_server.py",
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
                        classname = "OozieServer",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
       )
       self.assert_configure_default()
       self.assertResourceCalled('Directory', '/etc/oozie/conf/action-conf/hive',

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
index 2c8165c..59ab5c6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
@@ -22,21 +22,28 @@ import resource_management.libraries.functions
 from mock.mock import MagicMock, call, patch
 
 class TestServiceCheck(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "OOZIE/4.0.0.2.0/package"
+  STACK_VERSION = "2.0.6"
+  
   def test_service_check_default(self):
-    self.executeScript("2.0.6/services/OOZIE/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                         classname="OozieServiceCheck",
                         command="service_check",
-                        config_file="default.json"
+                        config_file="default.json",
+                        hdp_stack_version = self.STACK_VERSION,
+                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assert_service_check()
     self.assertNoMoreResources()
     
   def test_service_check_secured(self):
-    self.executeScript("2.0.6/services/OOZIE/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                         classname="OozieServiceCheck",
                         command="service_check",
-                        config_file="default.json"
+                        config_file="default.json",
+                        hdp_stack_version = self.STACK_VERSION,
+                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assert_service_check()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
index 4693bc2..1709dcb 100644
--- a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
@@ -22,20 +22,26 @@ from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 
 class TestHiveMetastore(RMFTestCase):
-
+  COMMON_SERVICES_PACKAGE_DIR = "HIVE/0.12.0.2.0/package"
+  STACK_VERSION = "2.0.6"
+  
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "configure",
-                       config_file="../../2.1/configs/default.json"
+                       config_file="../../2.1/configs/default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "start",
-                       config_file="../../2.1/configs/default.json"
+                       config_file="../../2.1/configs/default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assert_configure_default()
@@ -55,10 +61,12 @@ class TestHiveMetastore(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "stop",
-                       config_file="../../2.1/configs/default.json"
+                       config_file="../../2.1/configs/default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive.pid`',
@@ -70,19 +78,23 @@ class TestHiveMetastore(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "configure",
-                       config_file="../../2.1/configs/secured.json"
+                       config_file="../../2.1/configs/secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertNoMoreResources()
 
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "start",
-                       config_file="../../2.1/configs/secured.json"
+                       config_file="../../2.1/configs/secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assert_configure_secured()
@@ -100,10 +112,12 @@ class TestHiveMetastore(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/HIVE/package/scripts/hive_metastore.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_metastore.py",
                        classname = "HiveMetastore",
                        command = "stop",
-                       config_file="../../2.1/configs/secured.json"
+                       config_file="../../2.1/configs/secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', 'sudo kill `cat /var/run/hive/hive.pid`',


[37/37] ambari git commit: AMBARI-8747: Common Services: Refactor HDP-2.0.6 PIG, SQOOP services (Jayush Luniya)

Posted by jl...@apache.org.
AMBARI-8747: Common Services: Refactor HDP-2.0.6 PIG, SQOOP services (Jayush Luniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/191232e2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/191232e2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/191232e2

Branch: refs/heads/trunk
Commit: 191232e290f0a3e9b0f3ac7e737cd374858d0941
Parents: 46e9bcb
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Wed Dec 17 12:00:52 2014 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Dec 17 12:00:52 2014 -0800

----------------------------------------------------------------------
 .../PIG/0.12.0.2.0/configuration/pig-env.xml    |  38 +++++++
 .../PIG/0.12.0.2.0/configuration/pig-log4j.xml  |  62 +++++++++++
 .../0.12.0.2.0/configuration/pig-properties.xml |  89 +++++++++++++++
 .../common-services/PIG/0.12.0.2.0/metainfo.xml |  85 +++++++++++++++
 .../PIG/0.12.0.2.0/package/files/pigSmoke.sh    |  18 ++++
 .../PIG/0.12.0.2.0/package/scripts/params.py    |  72 +++++++++++++
 .../PIG/0.12.0.2.0/package/scripts/pig.py       |  60 +++++++++++
 .../0.12.0.2.0/package/scripts/pig_client.py    |  41 +++++++
 .../0.12.0.2.0/package/scripts/service_check.py | 107 +++++++++++++++++++
 .../SQOOP/1.4.4.2.0/configuration/sqoop-env.xml |  54 ++++++++++
 .../SQOOP/1.4.4.2.0/metainfo.xml                |  92 ++++++++++++++++
 .../SQOOP/1.4.4.2.0/package/scripts/__init__.py |  19 ++++
 .../SQOOP/1.4.4.2.0/package/scripts/params.py   |  53 +++++++++
 .../1.4.4.2.0/package/scripts/service_check.py  |  40 +++++++
 .../SQOOP/1.4.4.2.0/package/scripts/sqoop.py    |  51 +++++++++
 .../1.4.4.2.0/package/scripts/sqoop_client.py   |  41 +++++++
 .../services/PIG/configuration/pig-env.xml      |  38 -------
 .../services/PIG/configuration/pig-log4j.xml    |  62 -----------
 .../PIG/configuration/pig-properties.xml        |  89 ---------------
 .../stacks/HDP/2.0.6/services/PIG/metainfo.xml  |  61 +----------
 .../services/PIG/package/files/pigSmoke.sh      |  18 ----
 .../services/PIG/package/scripts/params.py      |  72 -------------
 .../2.0.6/services/PIG/package/scripts/pig.py   |  60 -----------
 .../services/PIG/package/scripts/pig_client.py  |  41 -------
 .../PIG/package/scripts/service_check.py        | 107 -------------------
 .../services/SQOOP/configuration/sqoop-env.xml  |  54 ----------
 .../HDP/2.0.6/services/SQOOP/metainfo.xml       |  68 +-----------
 .../services/SQOOP/package/scripts/__init__.py  |  19 ----
 .../services/SQOOP/package/scripts/params.py    |  53 ---------
 .../SQOOP/package/scripts/service_check.py      |  40 -------
 .../services/SQOOP/package/scripts/sqoop.py     |  51 ---------
 .../SQOOP/package/scripts/sqoop_client.py       |  41 -------
 .../python/stacks/2.0.6/PIG/test_pig_client.py  |  20 ++--
 .../stacks/2.0.6/PIG/test_pig_service_check.py  |  16 ++-
 .../stacks/2.0.6/SQOOP/test_service_check.py    |  14 ++-
 .../python/stacks/2.0.6/SQOOP/test_sqoop.py     |   8 +-
 36 files changed, 965 insertions(+), 889 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
new file mode 100644
index 0000000..aded45f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-env.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- pig-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for pig-env.sh file</description>
+    <value>
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+if [ -d "/usr/lib/tez" ]; then
+  PIG_OPTS="$PIG_OPTS -Dmapreduce.framework.name=yarn"
+fi
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
new file mode 100644
index 0000000..4fe323c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-log4j.xml
@@ -0,0 +1,62 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# ***** Set root logger level to DEBUG and its only appender to A.
+log4j.logger.org.apache.pig=info, A
+
+# ***** A is set to be a ConsoleAppender.
+log4j.appender.A=org.apache.log4j.ConsoleAppender
+# ***** A uses PatternLayout.
+log4j.appender.A.layout=org.apache.log4j.PatternLayout
+log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
new file mode 100644
index 0000000..59cc0c4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/configuration/pig-properties.xml
@@ -0,0 +1,89 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Describe all the Pig agent configurations</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
+# see bin/pig -help
+
+# brief logging (no timestamps)
+brief=false
+
+# debug level, INFO is default
+debug=INFO
+
+# verbose print all log messages to screen (default to print only INFO and above to screen)
+verbose=false
+
+# exectype local|mapreduce, mapreduce is default
+exectype=mapreduce
+
+# Enable insertion of information about script into hadoop job conf 
+pig.script.info.enabled=true
+
+# Do not spill temp files smaller than this size (bytes)
+pig.spill.size.threshold=5000000
+
+# EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+# This should help reduce the number of files being spilled.
+pig.spill.gc.activation.size=40000000
+
+# the following two parameters are to help estimate the reducer number
+pig.exec.reducers.bytes.per.reducer=1000000000
+pig.exec.reducers.max=999
+
+# Temporary location to store the intermediate data.
+pig.temp.dir=/tmp/
+
+# Threshold for merging FRJoin fragment files
+pig.files.concatenation.threshold=100
+pig.optimistic.files.concatenation=false;
+
+pig.disable.counter=false
+
+hcat.bin=/usr/bin/hcat
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/metainfo.xml
new file mode 100644
index 0000000..27bf492
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/metainfo.xml
@@ -0,0 +1,85 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <displayName>Pig</displayName>
+      <comment>Scripting platform for analyzing large datasets</comment>
+      <version>0.12.0.2.0</version>
+      <components>
+        <component>
+          <name>PIG</name>
+          <displayName>Pig</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/pig_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>pig-env.sh</fileName>
+              <dictionaryName>pig-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>pig-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>pig.properties</fileName>
+              <dictionaryName>pig-properties</dictionaryName>
+            </configFile>                         
+          </configFiles>          
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>pig-env</config-type>
+        <config-type>pig-log4j</config-type>
+        <config-type>pig-properties</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/files/pigSmoke.sh b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/files/pigSmoke.sh
new file mode 100644
index 0000000..a22456e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params.py
new file mode 100644
index 0000000..57beac1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params.py
@@ -0,0 +1,72 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management import *
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+hdp_stack_version = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
+
+#hadoop params
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
+  hadoop_home = '/usr/hdp/current/hadoop-client'
+  pig_bin_dir = '/usr/hdp/current/pig-client/bin'
+else:
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_home = '/usr'
+  pig_bin_dir = ""
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+pig_conf_dir = "/etc/pig/conf"
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+user_group = config['configurations']['cluster-env']['user_group']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+pig_env_sh_template = config['configurations']['pig-env']['content']
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+pig_properties = config['configurations']['pig-properties']['content']
+
+log4j_props = config['configurations']['pig-log4j']['content']
+
+import functools
+#create partial functions with common arguments for every HdfsDirectory call
+#to create hdfs directory we need to call params.HdfsDirectory in code
+HdfsDirectory = functools.partial(
+  HdfsDirectory,
+  conf_dir=hadoop_conf_dir,
+  hdfs_user=hdfs_principal_name if security_enabled else hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
+)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig.py
new file mode 100644
index 0000000..036b930
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig.py
@@ -0,0 +1,60 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+
+from resource_management import *
+
+def pig():
+  import params
+
+  Directory( params.pig_conf_dir,
+    recursive = True,
+    owner = params.hdfs_user,
+    group = params.user_group
+  )
+
+  File(format("{pig_conf_dir}/pig-env.sh"),
+    owner=params.hdfs_user,
+    mode=0755,
+    content=InlineTemplate(params.pig_env_sh_template)
+  )
+
+  # pig_properties is always set to a default even if it's not in the payload
+  File(format("{params.pig_conf_dir}/pig.properties"),
+              mode=0644,
+              group=params.user_group,
+              owner=params.hdfs_user,
+              content=params.pig_properties
+  )
+
+  if (params.log4j_props != None):
+    File(format("{params.pig_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hdfs_user,
+      content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.pig_conf_dir}/log4j.properties"))):
+    File(format("{params.pig_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hdfs_user
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py
new file mode 100644
index 0000000..931dceb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py
@@ -0,0 +1,41 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from pig import pig
+
+
+class PigClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    pig()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
new file mode 100644
index 0000000..a0e04ab
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
@@ -0,0 +1,107 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.dynamic_variable_interpretation import copy_tarballs_to_hdfs
+
+class PigServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    input_file = 'passwd'
+    output_file = "pigsmoke.out"
+
+    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+    create_file_cmd = format("{cleanup_cmd}; hadoop --config {hadoop_conf_dir} dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
+    test_cmd = format("fs -test -e {output_file}")
+
+    ExecuteHadoop( create_file_cmd,
+      tries     = 3,
+      try_sleep = 5,
+      user      = params.smokeuser,
+      conf_dir = params.hadoop_conf_dir,
+      # for kinit run
+      keytab = params.smoke_user_keytab,
+      security_enabled = params.security_enabled,
+      kinit_path_local = params.kinit_path_local,
+      bin_dir = params.hadoop_bin_dir
+    )
+
+    File( format("{tmp_dir}/pigSmoke.sh"),
+      content = StaticFile("pigSmoke.sh"),
+      mode = 0755
+    )
+
+    # check for Pig-on-M/R
+    Execute( format("pig {tmp_dir}/pigSmoke.sh"),
+      tries     = 3,
+      try_sleep = 5,
+      path      = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
+      user      = params.smokeuser
+    )
+
+    ExecuteHadoop( test_cmd,
+      user      = params.smokeuser,
+      conf_dir = params.hadoop_conf_dir,
+      bin_dir = params.hadoop_bin_dir
+    )
+
+    if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
+      # cleanup results from previous test
+      ExecuteHadoop( create_file_cmd,
+        tries     = 3,
+        try_sleep = 5,
+        user      = params.smokeuser,
+        conf_dir = params.hadoop_conf_dir,
+        # for kinit run
+        keytab = params.smoke_user_keytab,
+        security_enabled = params.security_enabled,
+        kinit_path_local = params.kinit_path_local,
+        bin_dir = params.hadoop_bin_dir
+      )
+
+      # Check for Pig-on-Tez
+      copy_tarballs_to_hdfs('tez', params.smokeuser, params.hdfs_user, params.user_group)
+
+      if params.security_enabled:
+        kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+        Execute(kinit_cmd,
+                user=params.smokeuser
+        )
+
+      Execute( format("pig -x tez {tmp_dir}/pigSmoke.sh"),
+        tries     = 3,
+        try_sleep = 5,
+        path      = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
+        user      = params.smokeuser
+      )
+
+      ExecuteHadoop( test_cmd,
+        user      = params.smokeuser,
+        conf_dir = params.hadoop_conf_dir,
+        bin_dir = params.hadoop_bin_dir
+      )
+
+if __name__ == "__main__":
+  PigServiceCheck().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml
new file mode 100644
index 0000000..9a740be
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/configuration/sqoop-env.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- sqoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for sqoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+#Set path to where bin/hadoop is available
+#Set path to where bin/hadoop is available
+export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+#set the path to where bin/hbase is available
+export HBASE_HOME=${HBASE_HOME:-{{hbase_home}}}
+
+#Set the path to where bin/hive is available
+export HIVE_HOME=${HIVE_HOME:-{{hive_home}}}
+
+#Set the path for where zookeper config dir is
+export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
+
+# add libthrift in hive to sqoop class path first so hive imports work
+export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"
+    </value>
+  </property>
+  <property>
+    <name>sqoop_user</name>
+    <description>User to run Sqoop as</description>
+    <property-type>USER</property-type>
+    <value>sqoop</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/metainfo.xml
new file mode 100644
index 0000000..1f4a90b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/metainfo.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <displayName>Sqoop</displayName>
+      <comment>Tool for transferring bulk data between Apache Hadoop and
+        structured data stores such as relational databases
+      </comment>
+      <version>1.4.4.2.0</version>
+
+      <components>
+        <component>
+          <name>SQOOP</name>
+          <displayName>Sqoop</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/sqoop_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>sqoop-env.sh</fileName>
+              <dictionaryName>sqoop-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>sqoop</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+      
+      <configuration-dependencies>
+        <config-type>sqoop-env</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params.py
new file mode 100644
index 0000000..e8a3ac7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management import *
+
+config = Script.get_config()
+
+hdp_stack_version = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
+
+#hadoop params
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  sqoop_conf_dir = '/etc/sqoop/conf'
+  sqoop_lib = '/usr/hdp/current/sqoop-client/lib'
+  hadoop_home = '/usr/hdp/current/hbase-client'
+  hbase_home = '/usr/hdp/current/hbase-client'
+  hive_home = '/usr/hdp/current/hive-client'
+  sqoop_bin_dir = '/usr/hdp/current/sqoop-client/bin/'
+else:
+  sqoop_conf_dir = "/usr/lib/sqoop/conf"
+  sqoop_lib = "/usr/lib/sqoop/lib"
+  hadoop_home = '/usr/lib/hadoop'
+  hbase_home = "/usr/lib/hbase"
+  hive_home = "/usr/lib/hive"
+  sqoop_bin_dir = "/usr/bin"
+
+zoo_conf_dir = "/etc/zookeeper"
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+user_group = config['configurations']['cluster-env']['user_group']
+sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
+
+sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
+
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/service_check.py
new file mode 100644
index 0000000..bcd236c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/service_check.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management import *
+
+
+class SqoopServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+      Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser}"),
+              user = params.smokeuser,
+      )
+    Execute("sqoop version",
+            user = params.smokeuser,
+            path = params.sqoop_bin_dir,
+            logoutput = True
+    )
+
+if __name__ == "__main__":
+  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py
new file mode 100644
index 0000000..99ad575
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop.py
@@ -0,0 +1,51 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import os
+
+def sqoop(type=None):
+  import params
+  Link(params.sqoop_lib + "/mysql-connector-java.jar",
+       to = '/usr/share/java/mysql-connector-java.jar'
+  ) 
+  Directory(params.sqoop_conf_dir,
+            owner = params.sqoop_user,
+            group = params.user_group,
+            recursive = True
+  )
+  File(format("{sqoop_conf_dir}/sqoop-env.sh"),
+    owner=params.sqoop_user,
+    group = params.user_group,
+    content=InlineTemplate(params.sqoop_env_sh_template)
+  )
+  update_config_permissions(["sqoop-env-template.sh",
+                             "sqoop-site-template.xml",
+                             "sqoop-site.xml"])
+  pass
+
+def update_config_permissions(names):
+  import params
+  for filename in names:
+    full_filename = os.path.join(params.sqoop_conf_dir, filename)
+    File(full_filename,
+          owner = params.sqoop_user,
+          group = params.user_group,
+          only_if = format("test -e {full_filename}")
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py
new file mode 100644
index 0000000..6829557
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from sqoop import sqoop
+
+
+class SqoopClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    sqoop(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  SqoopClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-env.xml
deleted file mode 100644
index aded45f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-env.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <!-- pig-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for pig-env.sh file</description>
-    <value>
-JAVA_HOME={{java64_home}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-if [ -d "/usr/lib/tez" ]; then
-  PIG_OPTS="$PIG_OPTS -Dmapreduce.framework.name=yarn"
-fi
-    </value>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-log4j.xml
deleted file mode 100644
index 4fe323c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-log4j.xml
+++ /dev/null
@@ -1,62 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# ***** Set root logger level to DEBUG and its only appender to A.
-log4j.logger.org.apache.pig=info, A
-
-# ***** A is set to be a ConsoleAppender.
-log4j.appender.A=org.apache.log4j.ConsoleAppender
-# ***** A uses PatternLayout.
-log4j.appender.A.layout=org.apache.log4j.PatternLayout
-log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-properties.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-properties.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-properties.xml
deleted file mode 100644
index 59cc0c4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/configuration/pig-properties.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Describe all the Pig agent configurations</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-# debug level, INFO is default
-debug=INFO
-
-# verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-# exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-# Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-# Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-
-# EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-# This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-# the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-# Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-# Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false
-
-hcat.bin=/usr/bin/hcat
-
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
index 27bf492..c0e5aa9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/metainfo.xml
@@ -20,66 +20,7 @@
   <services>
     <service>
       <name>PIG</name>
-      <displayName>Pig</displayName>
-      <comment>Scripting platform for analyzing large datasets</comment>
-      <version>0.12.0.2.0</version>
-      <components>
-        <component>
-          <name>PIG</name>
-          <displayName>Pig</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/pig_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>pig-env.sh</fileName>
-              <dictionaryName>pig-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>pig-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>pig.properties</fileName>
-              <dictionaryName>pig-properties</dictionaryName>
-            </configFile>                         
-          </configFiles>          
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>YARN</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>pig-env</config-type>
-        <config-type>pig-log4j</config-type>
-        <config-type>pig-properties</config-type>
-      </configuration-dependencies>
-
+      <extends>common-services/PIG/0.12.0.2.0</extends>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/files/pigSmoke.sh
deleted file mode 100644
index a22456e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-/*Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License */
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
deleted file mode 100644
index 57beac1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
+++ /dev/null
@@ -1,72 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-hdp_stack_version = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
-
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
-  hadoop_home = '/usr/hdp/current/hadoop-client'
-  pig_bin_dir = '/usr/hdp/current/pig-client/bin'
-else:
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_home = '/usr'
-  pig_bin_dir = ""
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-pig_conf_dir = "/etc/pig/conf"
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-user_group = config['configurations']['cluster-env']['user_group']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-pig_env_sh_template = config['configurations']['pig-env']['content']
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-
-pig_properties = config['configurations']['pig-properties']['content']
-
-log4j_props = config['configurations']['pig-log4j']['content']
-
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_principal_name if security_enabled else hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py
deleted file mode 100644
index 036b930..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management import *
-
-def pig():
-  import params
-
-  Directory( params.pig_conf_dir,
-    recursive = True,
-    owner = params.hdfs_user,
-    group = params.user_group
-  )
-
-  File(format("{pig_conf_dir}/pig-env.sh"),
-    owner=params.hdfs_user,
-    mode=0755,
-    content=InlineTemplate(params.pig_env_sh_template)
-  )
-
-  # pig_properties is always set to a default even if it's not in the payload
-  File(format("{params.pig_conf_dir}/pig.properties"),
-              mode=0644,
-              group=params.user_group,
-              owner=params.hdfs_user,
-              content=params.pig_properties
-  )
-
-  if (params.log4j_props != None):
-    File(format("{params.pig_conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hdfs_user,
-      content=params.log4j_props
-    )
-  elif (os.path.exists(format("{params.pig_conf_dir}/log4j.properties"))):
-    File(format("{params.pig_conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hdfs_user
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig_client.py
deleted file mode 100644
index 931dceb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/pig_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from pig import pig
-
-
-class PigClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    pig()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py
deleted file mode 100644
index a0e04ab..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from resource_management.libraries.functions.dynamic_variable_interpretation import copy_tarballs_to_hdfs
-
-class PigServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    input_file = 'passwd'
-    output_file = "pigsmoke.out"
-
-    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-    create_file_cmd = format("{cleanup_cmd}; hadoop --config {hadoop_conf_dir} dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
-    test_cmd = format("fs -test -e {output_file}")
-
-    ExecuteHadoop( create_file_cmd,
-      tries     = 3,
-      try_sleep = 5,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir,
-      # for kinit run
-      keytab = params.smoke_user_keytab,
-      security_enabled = params.security_enabled,
-      kinit_path_local = params.kinit_path_local,
-      bin_dir = params.hadoop_bin_dir
-    )
-
-    File( format("{tmp_dir}/pigSmoke.sh"),
-      content = StaticFile("pigSmoke.sh"),
-      mode = 0755
-    )
-
-    # check for Pig-on-M/R
-    Execute( format("pig {tmp_dir}/pigSmoke.sh"),
-      tries     = 3,
-      try_sleep = 5,
-      path      = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
-      user      = params.smokeuser
-    )
-
-    ExecuteHadoop( test_cmd,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir,
-      bin_dir = params.hadoop_bin_dir
-    )
-
-    if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
-      # cleanup results from previous test
-      ExecuteHadoop( create_file_cmd,
-        tries     = 3,
-        try_sleep = 5,
-        user      = params.smokeuser,
-        conf_dir = params.hadoop_conf_dir,
-        # for kinit run
-        keytab = params.smoke_user_keytab,
-        security_enabled = params.security_enabled,
-        kinit_path_local = params.kinit_path_local,
-        bin_dir = params.hadoop_bin_dir
-      )
-
-      # Check for Pig-on-Tez
-      copy_tarballs_to_hdfs('tez', params.smokeuser, params.hdfs_user, params.user_group)
-
-      if params.security_enabled:
-        kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-        Execute(kinit_cmd,
-                user=params.smokeuser
-        )
-
-      Execute( format("pig -x tez {tmp_dir}/pigSmoke.sh"),
-        tries     = 3,
-        try_sleep = 5,
-        path      = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
-        user      = params.smokeuser
-      )
-
-      ExecuteHadoop( test_cmd,
-        user      = params.smokeuser,
-        conf_dir = params.hadoop_conf_dir,
-        bin_dir = params.hadoop_bin_dir
-      )
-
-if __name__ == "__main__":
-  PigServiceCheck().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/configuration/sqoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/configuration/sqoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/configuration/sqoop-env.xml
deleted file mode 100644
index 9a740be..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/configuration/sqoop-env.xml
+++ /dev/null
@@ -1,54 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <!-- sqoop-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for sqoop-env.sh file</description>
-    <value>
-# Set Hadoop-specific environment variables here.
-
-#Set path to where bin/hadoop is available
-#Set path to where bin/hadoop is available
-export HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-#set the path to where bin/hbase is available
-export HBASE_HOME=${HBASE_HOME:-{{hbase_home}}}
-
-#Set the path to where bin/hive is available
-export HIVE_HOME=${HIVE_HOME:-{{hive_home}}}
-
-#Set the path for where zookeper config dir is
-export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
-
-# add libthrift in hive to sqoop class path first so hive imports work
-export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"
-    </value>
-  </property>
-  <property>
-    <name>sqoop_user</name>
-    <description>User to run Sqoop as</description>
-    <property-type>USER</property-type>
-    <value>sqoop</value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/metainfo.xml
index 1f4a90b..1a9989e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/metainfo.xml
@@ -20,73 +20,7 @@
   <services>
     <service>
       <name>SQOOP</name>
-      <displayName>Sqoop</displayName>
-      <comment>Tool for transferring bulk data between Apache Hadoop and
-        structured data stores such as relational databases
-      </comment>
-      <version>1.4.4.2.0</version>
-
-      <components>
-        <component>
-          <name>SQOOP</name>
-          <displayName>Sqoop</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/sqoop_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>sqoop-env.sh</fileName>
-              <dictionaryName>sqoop-env</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>sqoop</name>
-            </package>
-            <package>
-              <name>mysql-connector-java</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HDFS</service>
-      </requiredServices>
-      
-      <configuration-dependencies>
-        <config-type>sqoop-env</config-type>
-      </configuration-dependencies>
+      <extends>common-services/SQOOP/1.4.4.2.0</extends>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
deleted file mode 100644
index e8a3ac7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
-
-config = Script.get_config()
-
-hdp_stack_version = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
-
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  sqoop_conf_dir = '/etc/sqoop/conf'
-  sqoop_lib = '/usr/hdp/current/sqoop-client/lib'
-  hadoop_home = '/usr/hdp/current/hbase-client'
-  hbase_home = '/usr/hdp/current/hbase-client'
-  hive_home = '/usr/hdp/current/hive-client'
-  sqoop_bin_dir = '/usr/hdp/current/sqoop-client/bin/'
-else:
-  sqoop_conf_dir = "/usr/lib/sqoop/conf"
-  sqoop_lib = "/usr/lib/sqoop/lib"
-  hadoop_home = '/usr/lib/hadoop'
-  hbase_home = "/usr/lib/hbase"
-  hive_home = "/usr/lib/hive"
-  sqoop_bin_dir = "/usr/bin"
-
-zoo_conf_dir = "/etc/zookeeper"
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-user_group = config['configurations']['cluster-env']['user_group']
-sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
-
-sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
-
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/service_check.py
deleted file mode 100644
index bcd236c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/service_check.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-
-from resource_management import *
-
-
-class SqoopServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    if params.security_enabled:
-      Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser}"),
-              user = params.smokeuser,
-      )
-    Execute("sqoop version",
-            user = params.smokeuser,
-            path = params.sqoop_bin_dir,
-            logoutput = True
-    )
-
-if __name__ == "__main__":
-  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop.py
deleted file mode 100644
index 99ad575..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import os
-
-def sqoop(type=None):
-  import params
-  Link(params.sqoop_lib + "/mysql-connector-java.jar",
-       to = '/usr/share/java/mysql-connector-java.jar'
-  ) 
-  Directory(params.sqoop_conf_dir,
-            owner = params.sqoop_user,
-            group = params.user_group,
-            recursive = True
-  )
-  File(format("{sqoop_conf_dir}/sqoop-env.sh"),
-    owner=params.sqoop_user,
-    group = params.user_group,
-    content=InlineTemplate(params.sqoop_env_sh_template)
-  )
-  update_config_permissions(["sqoop-env-template.sh",
-                             "sqoop-site-template.xml",
-                             "sqoop-site.xml"])
-  pass
-
-def update_config_permissions(names):
-  import params
-  for filename in names:
-    full_filename = os.path.join(params.sqoop_conf_dir, filename)
-    File(full_filename,
-          owner = params.sqoop_user,
-          group = params.user_group,
-          only_if = format("test -e {full_filename}")
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop_client.py
deleted file mode 100644
index 6829557..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/sqoop_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from sqoop import sqoop
-
-
-class SqoopClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    sqoop(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  SqoopClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
index 0ed0364..2227708 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
@@ -22,12 +22,16 @@ from stacks.utils.RMFTestCase import *
 import json
 
 class TestPigClient(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "PIG/0.12.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/PIG/package/scripts/pig_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/pig_client.py",
                        classname = "PigClient",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Directory', '/etc/pig/conf',
@@ -57,10 +61,12 @@ class TestPigClient(RMFTestCase):
 
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/PIG/package/scripts/pig_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/pig_client.py",
                        classname = "PigClient",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assertResourceCalled('Directory', '/etc/pig/conf',
@@ -95,10 +101,12 @@ class TestPigClient(RMFTestCase):
 
     default_json['hostLevelParams']['stack_version'] = '2.2'
 
-    self.executeScript("2.0.6/services/PIG/package/scripts/pig_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/pig_client.py",
                        classname = "PigClient",
                        command = "configure",
-                       config_dict=default_json
+                       config_dict=default_json,
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Directory', '/etc/pig/conf',


[36/37] ambari git commit: AMBARI-8747: Common Services: Refactor HDP-2.0.6 PIG, SQOOP services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
index 2521636..fbd6efc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
@@ -21,12 +21,16 @@ limitations under the License.
 from stacks.utils.RMFTestCase import *
 
 class TestPigServiceCheck(RMFTestCase):
-
+  COMMON_SERVICES_PACKAGE_DIR = "PIG/0.12.0.2.0/package"
+  STACK_VERSION = "2.0.6"
+  
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/PIG/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                        classname = "PigServiceCheck",
                        command = "service_check",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr pigsmoke.out passwd; hadoop --config /etc/hadoop/conf dfs -put /etc/passwd passwd ',
       try_sleep = 5,
@@ -59,10 +63,12 @@ class TestPigServiceCheck(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/PIG/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                        classname = "PigServiceCheck",
                        command = "service_check",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr pigsmoke.out passwd; hadoop --config /etc/hadoop/conf dfs -put /etc/passwd passwd ',

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_service_check.py
index 879a9e6..8b8debe 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_service_check.py
@@ -21,12 +21,16 @@ from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 
 class TestSqoopServiceCheck(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "SQOOP/1.4.4.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_service_check_secured(self):
-    self.executeScript("2.0.6/services/SQOOP/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                        classname = "SqoopServiceCheck",
                        command = "service_check",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', '/usr/bin/kinit  -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa',
                               user = 'ambari-qa'
@@ -38,10 +42,12 @@ class TestSqoopServiceCheck(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_service_check_default(self):
-    self.executeScript("2.0.6/services/SQOOP/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                        classname = "SqoopServiceCheck",
                        command = "service_check",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'sqoop version',
                               logoutput = True,

http://git-wip-us.apache.org/repos/asf/ambari/blob/191232e2/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
index 76948f7..1fe98db 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
@@ -21,12 +21,16 @@ from mock.mock import MagicMock, call, patch
 from stacks.utils.RMFTestCase import *
 
 class TestSqoop(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "SQOOP/1.4.4.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/SQOOP/package/scripts/sqoop_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/sqoop_client.py",
                        classname = "SqoopClient",
                        command = "configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Link', '/usr/lib/sqoop/lib/mysql-connector-java.jar',
                               to = '/usr/share/java/mysql-connector-java.jar',)


[17/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/rrd.py.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/rrd.py.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/rrd.py.j2
deleted file mode 100644
index 65d70e2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/rrd.py.j2
+++ /dev/null
@@ -1,361 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-# NOTE: This script is executed by Python 2.4 on Centos 5. 
-# Make sure your changes are compatible.
-
-import cgi
-import glob
-import os
-import re
-import rrdtool
-import sys
-import time
-import urlparse
-
-# place this script in /var/www/cgi-bin of the Ganglia collector
-# requires 'yum install rrdtool-python' on the Ganglia collector
-'''
-  Loads rrd file info
-'''
-def loadRRDData(file, cf, start, end, resolution):
-  args = [file, cf, "--daemon", "unix:{{ganglia_runtime_dir}}/rrdcached.limited.sock"]
-
-  if start is not None:
-    args.extend(["-s", start])
-  else:
-    args.extend(["-s", "now-10m"])
-
-  if end is not None:
-    args.extend(["-e", end])
-
-  if resolution is not None:
-    args.extend(["-r", resolution])
-
-  return rrdtool.fetch(args)
-
-'''
-  Collects metrics across several matching filenames.
-'''
-def collectStatMetrics(clusterName, hostName, metricName, files, cf, start, end, resolution):
-  if clusterName[0] is not '/':
-    clusterName.insert(0, '/')
-
-  metricParts = metricName.split('.')
-
-  # already know there's at least one
-  metricStat = metricParts[-1]
-  metricName = '.'.join(metricParts[:-1])
-
-  isRate = False
-  if len(metricParts) > 1 and metricParts[-2] == '_rate':
-    isRate = True
-    metricName = '.'.join(metricParts[:-2])
-
-  pattern = re.compile(metricName + '\.rrd$')
-  matchedFiles = filter(pattern.match, files)
-
-  parentPath = os.path.join(*clusterName)
-
-  actualFiles = []
-  for matchedFile in matchedFiles:
-    if hostName != "__SummaryInfo__":
-      osFiles = glob.glob(os.path.join(parentPath, hostName, matchedFile))
-    else:
-      osFiles = glob.glob(os.path.join(parentPath, '*', matchedFile))
-
-    for f in osFiles:
-      if -1 == f.find("__SummaryInfo__"):
-        actualFiles.append(f)
-
-  if len(actualFiles) == 0:
-    return
-
-  '''
-  [
-    {
-      "step_value": update each iteration
-      "count": increase by 1 each iteration
-      "sum": increase by value each iteration
-      "avg": update each iteration as sum/count
-      "min": update each iteration if step_value < old min OR min is missing (first time)
-      "max": update each iteration if step_value > old max OR max is missing (first time)
-    }
-  ]
-  '''
-
-  timestamp = None
-  stepsize = None
-  concreteMetricName = None
-  vals = None # values across all files
-
-  for file in actualFiles:
-    rrdMetric = loadRRDData(file, cf, start, end, resolution)
-    
-    if timestamp is None and stepsize is None and concreteMetricName is None:
-      timestamp = rrdMetric[0][0]
-      stepsize = rrdMetric[0][2]
-      
-      if not isRate:
-        suffix = metricStat
-      else:
-        suffix = '_rate.' + metricStat
-      
-      concreteMetricName = file.split(os.sep).pop().replace('rrd', suffix)
-
-    metricValues = rrdMetric[2]
-
-    if vals is None:
-      vals = [None] * len(metricValues)
-
-    i = 0
-    for tuple in metricValues:
-      if vals[i] is None:
-        vals[i] = {}
-        vals[i]['count'] = 0
-        vals[i]['_sum'] = 0.0
-        vals[i]['_avg'] = 0.0
-        vals[i]['_min'] = 999999999999.99
-        vals[i]['_max'] = 0.0
-
-      rawValue = tuple[0]
-      vals[i]['step_value'] = rawValue
-      if rawValue is None:
-        i += 1
-        continue
-
-      if isRate:
-        if 0 == i:
-          rawValue = 0.0
-        elif vals[i-1]['step_value'] is None:
-          rawValue = 0.0
-        else:
-          rawValue = (rawValue - vals[i-1]['step_value']) / stepsize
-      
-      vals[i]['count'] += 1 
-      vals[i]['_sum'] += rawValue
-
-      vals[i]['_avg'] = vals[i]['_sum']/vals[i]['count']
-
-      if rawValue < vals[i]['_min']:
-        vals[i]['_min'] = rawValue
-
-      if rawValue > vals[i]['_max']:
-        vals[i]['_max'] = rawValue
-      
-      i += 1
-
-  sys.stdout.write("sum\n")
-  sys.stdout.write(clusterName[len(clusterName)-1] + "\n")
-  sys.stdout.write(hostName + "\n")
-  sys.stdout.write(concreteMetricName + "\n")
-  sys.stdout.write(str(timestamp) + "\n")
-  sys.stdout.write(str(stepsize) + "\n")
-
-  for val in vals:
-    if val['step_value'] is None:
-      sys.stdout.write("[~n]")
-    else:
-      sys.stdout.write(str(val[metricStat]))
-    sys.stdout.write("\n")
-
-  sys.stdout.write("[~EOM]\n")
-
-  return
-
-def printMetric(clusterName, hostName, metricName, file, cf, start, end,
-                resolution, pointInTime):
-  if clusterName.endswith("rrds"):
-    clusterName = ""
- 
-  rrdMetric = loadRRDData(file, cf, start, end, resolution)
-
-  # ds_name
-  sys.stdout.write(rrdMetric[1][0])
-  sys.stdout.write("\n")
-
-  sys.stdout.write(clusterName)
-  sys.stdout.write("\n")
-  sys.stdout.write(hostName)
-  sys.stdout.write("\n")
-  sys.stdout.write(metricName)
-  sys.stdout.write("\n")
-
-  # write time
-  sys.stdout.write(str(rrdMetric[0][0]))
-  sys.stdout.write("\n")
-  # write step
-  sys.stdout.write(str(rrdMetric[0][2]))
-  sys.stdout.write("\n")
-
-  if not pointInTime:
-    valueCount = 0
-    lastValue = None
-
-    for tuple in rrdMetric[2]:
-
-      thisValue = tuple[0]
-
-      if valueCount > 0 and thisValue == lastValue:
-        valueCount += 1
-      else:
-        if valueCount > 1:
-          sys.stdout.write("[~r]")
-          sys.stdout.write(str(valueCount))
-          sys.stdout.write("\n")
-
-        if thisValue is None:
-          sys.stdout.write("[~n]\n")
-        else:
-          sys.stdout.write(str(thisValue))
-          sys.stdout.write("\n")
-
-        valueCount = 1
-        lastValue = thisValue
-  else:
-    value = None
-    idx = -1
-    tuple = rrdMetric[2]
-    tupleLastIdx = len(tuple) * -1
-
-    while value is None and idx >= tupleLastIdx:
-      value = tuple[idx][0]
-      idx -= 1
-
-    if value is not None:
-      sys.stdout.write(str(value))
-      sys.stdout.write("\n")
-
-  sys.stdout.write("[~EOM]\n")
-  return
-
-
-def stripList(l):
-  return ([x.strip() for x in l])
-
-
-sys.stdout.write("Content-type: text/plain\n\n")
-
-# write start time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-requestMethod = os.environ['REQUEST_METHOD']
-
-if requestMethod == 'POST':
-  postData = sys.stdin.readline()
-  queryString = cgi.parse_qs(postData)
-  queryString = dict((k, v[0]) for k, v in queryString.items())
-elif requestMethod == 'GET':
-  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
-
-if "m" in queryString:
-  metricParts = queryString["m"].split(",")
-else:
-  metricParts = [""]
-metricParts = stripList(metricParts)
-
-hostParts = []
-if "h" in queryString:
-  hostParts = queryString["h"].split(",")
-hostParts = stripList(hostParts)
-
-if "c" in queryString:
-  clusterParts = queryString["c"].split(",")
-else:
-  clusterParts = [""]
-clusterParts = stripList(clusterParts)
-
-if "p" in queryString:
-  rrdPath = queryString["p"]
-else:
-  rrdPath = "{{rrdcached_base_dir}}"
-
-start = None
-if "s" in queryString:
-  start = queryString["s"]
-
-end = None
-if "e" in queryString:
-  end = queryString["e"]
-
-resolution = None
-if "r" in queryString:
-  resolution = queryString["r"]
-
-if "cf" in queryString:
-  cf = queryString["cf"]
-else:
-  cf = "AVERAGE"
-
-if "pt" in queryString:
-  pointInTime = True
-else:
-  pointInTime = False
-
-
-def _walk(*args, **kwargs):
-  for root, dirs, files in os.walk(*args, **kwargs):
-    for dir in dirs:
-      qualified_dir = os.path.join(root, dir)
-      if os.path.islink(qualified_dir):
-        for x in os.walk(qualified_dir, **kwargs):
-          yield x
-    yield (root, dirs, files)
-
-
-for cluster in clusterParts:
-  for path, dirs, files in _walk(os.path.join(rrdPath,cluster)):
-    pathParts = path.split("/")
-    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
-    #If host parameter passed - process only this host folder
-    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
-      for metric in metricParts:
-        file = metric + ".rrd"
-        fileFullPath = os.path.join(path, file)
-        if os.path.exists(fileFullPath):
-          #Exact name of metric
-          printMetric(pathParts[-2], pathParts[-1], file[:-4],
-                      os.path.join(path, file), cf, start, end, resolution,
-                      pointInTime)
-        else:
-          need_stats = False
-          parts = metric.split(".")
-          if len(parts) > 0 and parts[-1] in ['_min', '_max', '_avg', '_sum']:
-              need_stats = True
-
-          if need_stats and not pointInTime:
-            collectStatMetrics(pathParts[:-1], pathParts[-1], metric, files, cf, start, end, resolution)
-          else:
-            #Regex as metric name
-            metricRegex = metric + '\.rrd$'
-            p = re.compile(metricRegex)
-            matchedFiles = filter(p.match, files)
-            for matchedFile in matchedFiles:
-              printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
-                         os.path.join(path, matchedFile), cf, start, end,
-                         resolution, pointInTime)
-
-sys.stdout.write("[~EOF]\n")
-# write end time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-sys.stdout.flush

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/alerts.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/alerts.json
deleted file mode 100644
index fa911e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/alerts.json
+++ /dev/null
@@ -1,124 +0,0 @@
-{
-  "HBASE": {
-    "service": [
-      {
-        "name": "hbase_regionserver_process_percent",
-        "label": "Percent RegionServers Available",
-        "description": "This service-level alert is triggered if the configured percentage of RegionServer processes cannot be determined to be up and listening on the network for the configured warning and critical thresholds. It aggregates the results of RegionServer process down checks.",
-        "interval": 1,
-        "scope": "SERVICE",
-        "enabled": true,
-        "source": {
-          "type": "AGGREGATE",
-          "alert_name": "hbase_regionserver_process",
-          "reporting": {
-            "ok": {
-              "text": "affected: [{1}], total: [{0}]"
-            },
-            "warning": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.1
-            },
-            "critical": {
-              "text": "affected: [{1}], total: [{0}]",
-              "value": 0.3
-            }
-          }
-        }
-      }    
-    ],
-    "HBASE_MASTER": [
-      {
-        "name": "hbase_master_process",
-        "label": "HBase Master Process",
-        "description": "This alert is triggered if the HBase master processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "PORT",
-          "uri": "{{hbase-site/hbase.master.port}}",
-          "default_port": 60000,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      },
-      {
-        "name": "hbase_master_cpu",
-        "label": "HBase Maser CPU Utilization",
-        "description": "This host-level alert is triggered if CPU utilization of the HBase Master exceeds certain warning and critical thresholds. It checks the HBase Master JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
-        "interval": 5,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "METRIC",
-          "uri": {
-            "http": "{{hbase-site/hbase.master.info.port}}",
-            "https": "{{hbase-site/hbase.master.info.port}}",
-            "https_property": "{{cluster-env/security_enabled}}",
-            "https_property_value": "true",
-            "default_port": 60010
-          },
-          "reporting": {
-            "ok": {
-              "text": "{1} CPU, load {0:.1%}"
-            },
-            "warning": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 200
-            },
-            "critical": {
-              "text": "{1} CPU, load {0:.1%}",
-              "value": 250
-            },
-            "units" : "%"
-          },
-          "jmx": {
-            "property_list": [
-              "java.lang:type=OperatingSystem/SystemCpuLoad",
-              "java.lang:type=OperatingSystem/AvailableProcessors"
-            ],
-            "value": "{0} * 100"
-          }
-        }
-      }
-    ],
-    "HBASE_REGIONSERVER": [
-      {
-        "name": "hbase_regionserver_process",
-        "label": "HBase RegionServer Process",
-        "description": "This host-level alert is triggered if the RegionServer processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
-        "interval": 1,
-        "scope": "HOST",
-        "source": {
-          "type": "PORT",
-          "uri": "{{hbase-site/hbase.regionserver.info.port}}",
-          "default_port": 60030,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-env.xml
deleted file mode 100644
index 231baa6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-env.xml
+++ /dev/null
@@ -1,135 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Pid Directory for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>HBase RegionServer Heap Size.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_xmn_max</name>
-    <value>512</value>
-    <description>HBase RegionServer maximum value for minimum heap size.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_xmn_ratio</name>
-    <value>0.2</value>
-    <description>HBase RegionServer minimum heap size is calculated as a percentage of max heap size.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <property-type>USER</property-type>
-    <description>HBase User Name.</description>
-  </property>
-
-  <!-- hbase-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for hbase-env.sh file</description>
-    <value>
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME={{java64_home}}
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}
-
-# Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR={{log_dir}}
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR={{pid_dir}}
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
-
-{% if security_enabled %}
-export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
-{% else %}
-export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
-{% endif %}
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-log4j.xml
deleted file mode 100644
index 57b3845..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-log4j.xml
+++ /dev/null
@@ -1,143 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Define some default values that can be overridden by system properties
-hbase.root.logger=INFO,console
-hbase.security.logger=INFO,console
-hbase.log.dir=.
-hbase.log.file=hbase.log
-
-# Define the root logger to the system property "hbase.root.logger".
-log4j.rootLogger=${hbase.root.logger}
-
-# Logging Threshold
-log4j.threshold=ALL
-
-#
-# Daily Rolling File Appender
-#
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
-
-# Rolling File Appender properties
-hbase.log.maxfilesize=256MB
-hbase.log.maxbackupindex=20
-
-# Rolling File Appender
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}
-
-log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}
-log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
-
-#
-# Security audit appender
-#
-hbase.security.log.file=SecurityAuth.audit
-hbase.security.log.maxfilesize=256MB
-hbase.security.log.maxbackupindex=20
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}
-log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.category.SecurityLogger=${hbase.security.logger}
-log4j.additivity.SecurityLogger=false
-#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE
-
-#
-# Null Appender
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n
-
-# Custom Logging levels
-
-log4j.logger.org.apache.zookeeper=INFO
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.hbase=DEBUG
-# Make these two classes INFO-level. Make them DEBUG to see more zk debug.
-log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO
-log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO
-#log4j.logger.org.apache.hadoop.dfs=DEBUG
-# Set this class to log INFO only otherwise its OTT
-# Enable this to get detailed connection error/retry logging.
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE
-
-
-# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)
-#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG
-
-# Uncomment the below if you want to remove logging of client region caching'
-# and scan of .META. messages
-# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO
-# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO
-
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index 2f12801..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="true">
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index 84900d1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,331 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_final="true">
-  <property>
-    <name>hbase.rootdir</name>
-    <value>hdfs://localhost:8020/apps/hbase/data</value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.port</name>
-    <value>60000</value>
-    <description>The port the HBase Master should bind to.</description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/hadoop/hbase</value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.local.dir</name>
-    <value>${hbase.tmp.dir}/local</value>
-    <description>Directory on the local filesystem to be used as a local storage
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value>0.0.0.0</value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value>60010</value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value>60030</value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>60</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in milliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.38</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>30000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.flush.retries.number</name>
-    <value>120</value>
-    <description>
-    The number of times the region flush operation will be retried.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>10</value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.40</value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-    <description>  Controls whether or not secure authentication is enabled for HBase. Possible values are 'simple'
-      (no authentication), and 'kerberos'.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>true</value>
-    <description>Disables version verification.</description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <description>Path to domain socket.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml
index fd290df..194f79e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml
@@ -20,125 +20,7 @@
   <services>
     <service>
       <name>HBASE</name>
-      <displayName>HBase</displayName>
-      <comment>Non-relational distributed database and centralized service for configuration management &amp;
-        synchronization
-      </comment>
-      <version>0.96.0.2.0</version>
-      <components>
-        <component>
-          <name>HBASE_MASTER</name>
-          <displayName>HBase Master</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HBASE/HBASE_MASTER</co-locate>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/hbase_master.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/hbase_master.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>HBASE_REGIONSERVER</name>
-          <displayName>RegionServer</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hbase_regionserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HBASE_CLIENT</name>
-          <displayName>HBase Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hbase_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>hbase-site.xml</fileName>
-              <dictionaryName>hbase-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hbase-env.sh</fileName>
-              <dictionaryName>hbase-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>xml</type>
-              <fileName>hbase-policy.xml</fileName>
-              <dictionaryName>hbase-policy</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>hbase-log4j</dictionaryName>
-            </configFile>            
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hbase</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>hbase-policy</config-type>
-        <config-type>hbase-site</config-type>
-        <config-type>hbase-env</config-type>
-        <config-type>hbase-log4j</config-type>
-      </configuration-dependencies>
-
+      <extends>common-services/HBASE/0.96.0.2.0</extends>
     </service>
   </services>
 </metainfo>


[33/37] ambari git commit: AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-exec-log4j.xml
new file mode 100644
index 0000000..fb852f7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-exec-log4j.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom hive-exec-log4j</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+
+hive.log.threshold=ALL
+hive.root.logger=INFO,FA
+hive.log.dir=${java.io.tmpdir}/${user.name}
+hive.query.id=hadoop
+hive.log.file=${hive.query.id}.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hive.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=${hive.log.threshold}
+
+#
+# File Appender
+#
+
+log4j.appender.FA=org.apache.log4j.FileAppender
+log4j.appender.FA.File=${hive.log.dir}/${hive.log.file}
+log4j.appender.FA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#custom logging levels
+#log4j.logger.xxx=DEBUG
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
+
+
+log4j.category.DataNucleus=ERROR,FA
+log4j.category.Datastore=ERROR,FA
+log4j.category.Datastore.Schema=ERROR,FA
+log4j.category.JPOX.Datastore=ERROR,FA
+log4j.category.JPOX.Plugin=ERROR,FA
+log4j.category.JPOX.MetaData=ERROR,FA
+log4j.category.JPOX.Query=ERROR,FA
+log4j.category.JPOX.General=ERROR,FA
+log4j.category.JPOX.Enhancer=ERROR,FA
+
+
+# Silence useless ZK logs
+log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA
+log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA
+
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-log4j.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-log4j.xml
new file mode 100644
index 0000000..a978ef7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-log4j.xml
@@ -0,0 +1,120 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hive.log.threshold=ALL
+hive.root.logger=INFO,DRFA
+hive.log.dir=${java.io.tmpdir}/${user.name}
+hive.log.file=hive.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hive.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=${hive.log.threshold}
+
+#
+# Daily Rolling File Appender
+#
+# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files
+# for different CLI session.
+#
+# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+
+log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
+log4j.appender.console.encoding=UTF-8
+
+#custom logging levels
+#log4j.logger.xxx=DEBUG
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
+
+
+log4j.category.DataNucleus=ERROR,DRFA
+log4j.category.Datastore=ERROR,DRFA
+log4j.category.Datastore.Schema=ERROR,DRFA
+log4j.category.JPOX.Datastore=ERROR,DRFA
+log4j.category.JPOX.Plugin=ERROR,DRFA
+log4j.category.JPOX.MetaData=ERROR,DRFA
+log4j.category.JPOX.Query=ERROR,DRFA
+log4j.category.JPOX.General=ERROR,DRFA
+log4j.category.JPOX.Enhancer=ERROR,DRFA
+
+
+# Silence useless ZK logs
+log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
+log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-site.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-site.xml
new file mode 100644
index 0000000..67be680
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-site.xml
@@ -0,0 +1,329 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>hive.heapsize</name>
+    <value>1024</value>
+    <description>Hive Java heap size</description>
+  </property>
+
+  <property>
+    <name>ambari.hive.db.schema.name</name>
+    <value>hive</value>
+    <description>Database name used as the Hive Metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <description>Driver class name for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value>hive</value>
+    <description>username to use against metastore database</description>
+  </property>
+
+  <property require-input="true">
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value> </value>
+    <property-type>PASSWORD</property-type>
+    <description>password to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value>false</value>
+    <description>If true, the metastore thrift interface will be secured with SASL.
+     Clients must authenticate with Kerberos.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://localhost:9083</value>
+    <description>URI for client to contact metastore server</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>Pre-event listener classes to be loaded on the metastore side to run code
+      whenever databases, tables, and partitions are created, altered, or dropped.
+      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+      if metastore-side authorization is desired.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>Pre-event listener classes to be loaded on the metastore side to run code
+      whenever databases, tables, and partitions are created, altered, or dropped.
+      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+      if metastore-side authorization is desired.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>60</value>
+    <description>MetaStore Client socket timeout in seconds</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.enabled</name>
+    <value>false</value>
+    <description>enable or disable the hive client authorization</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
+      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
+      process runs as.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.impersonation</name>
+    <description>Enable user impersonation for HiveServer2</description>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication</name>
+    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
+    <value>NONE</value>
+  </property>
+
+  <property>
+    <name>fs.hdfs.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable HDFS filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>fs.file.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable local filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.bucketing</name>
+    <value>true</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.sorting</name>
+    <value>true</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin</name>
+    <value>true</value>
+    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
+      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
+      this parameter as true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <value>false</value>
+    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
+    of buckets, a sort-merge join can be performed by setting this parameter as true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>Whether speculative execution for reducers should be turned on.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common
+      join into mapjoin based on the input file size.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>true</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
+      the criteria for sort-merge join.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
+    <value>true</value>
+    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
+      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>1000000000</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task). The default is 10MB.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>1</value>
+    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be disabled if number of reducers is less than specified value.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.mapjoin.mapreduce</name>
+    <value>true</value>
+    <description>If hive.auto.convert.join is off, this parameter does not take
+      affect. If it is on, and if there are map-join jobs followed by a map-reduce
+      job (for e.g a group by), each map-only job is merged with the following
+      map-reduce job.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>10000</value>
+    <description>
+      Size per reducer.The default is 1G, i.e if the input size is 10G, it
+      will use 10 reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>false</value>
+    <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication</name>
+    <value>true</value>
+    <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.index.filter</name>
+    <value>true</value>
+    <description>
+    Whether to enable automatic use of indexes
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.port</name>
+    <value>10000</value>
+    <description>
+      TCP port number to listen on, default 10000.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.support.dynamic.service.discovery</name>
+    <value>false</value>
+    <description>Whether HiveServer2 supports dynamic service discovery for its
+      clients. To support this, each instance of HiveServer2 currently uses
+      ZooKeeper to register itself, when it is brought up. JDBC/ODBC clients
+      should use the ZooKeeper ensemble: hive.zookeeper.quorum in their
+      connection string.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.zookeeper.namespace</name>
+    <value>hiveserver2</value>
+    <description>The parent node in ZooKeeper used by HiveServer2 when
+      supporting dynamic service discovery.
+    </description>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/webhcat-env.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/webhcat-env.xml
new file mode 100644
index 0000000..14a473f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/webhcat-env.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- webhcat-env.sh -->
+  <property>
+    <name>content</name>
+    <description>webhcat-env.sh content</description>
+    <value>
+# The file containing the running pid
+PID_FILE={{webhcat_pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME={{hadoop_home}}
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/webhcat-site.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/webhcat-site.xml
new file mode 100644
index 0000000..335048e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/webhcat-site.xml
@@ -0,0 +1,135 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The default settings for Templeton. -->
+<!-- Edit templeton-site.xml to change settings for your local -->
+<!-- install. -->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>templeton.port</name>
+    <value>50111</value>
+    <description>The HTTP port for the main server.</description>
+  </property>
+
+  <property>
+    <name>templeton.hadoop.conf.dir</name>
+    <value>/etc/hadoop/conf</value>
+    <description>The path to the Hadoop configuration.</description>
+  </property>
+
+  <property>
+    <name>templeton.jar</name>
+    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
+    <description>The path to the Templeton jar file.</description>
+  </property>
+
+  <property>
+    <name>templeton.libjars</name>
+    <value>/usr/lib/zookeeper/zookeeper.jar</value>
+    <description>Jars to add the the classpath.</description>
+  </property>
+
+  <property>
+    <name>templeton.hadoop</name>
+    <value>/usr/bin/hadoop</value>
+    <description>The path to the Hadoop executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.archive</name>
+    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
+    <description>The path to the Pig archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.path</name>
+    <value>pig.tar.gz/pig/bin/pig</value>
+    <description>The path to the Pig executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat</name>
+    <value>/usr/bin/hcat</value>
+    <description>The path to the hcatalog executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.archive</name>
+    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
+    <description>The path to the Hive archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.home</name>
+    <value>hive.tar.gz/hive</value>
+    <description>The path to the Hive home within the tar. Has no effect if templeton.hive.archive is not set.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat.home</name>
+    <value>hive.tar.gz/hive/hcatalog</value>
+    <description>The path to the HCat home within the tar. Has no effect if templeton.hive.archive is not set.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.path</name>
+    <value>hive.tar.gz/hive/bin/hive</value>
+    <description>The path to the Hive executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.properties</name>
+    <value>hive.metastore.local=false, hive.metastore.uris=thrift://localhost:9933, hive.metastore.sasl.enabled=false</value>
+    <description>Properties to set when running hive.</description>
+  </property>
+
+
+  <property>
+    <name>templeton.zookeeper.hosts</name>
+    <value>localhost:2181</value>
+    <description>ZooKeeper servers, as comma separated host:port pairs</description>
+  </property>
+
+  <property>
+    <name>templeton.storage.class</name>
+    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
+    <description>The class to use as storage</description>
+  </property>
+
+  <property>
+    <name>templeton.override.enabled</name>
+    <value>false</value>
+    <description>Enable the override path in templeton.override.jars</description>
+  </property>
+
+  <property>
+    <name>templeton.streaming.jar</name>
+    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
+    <description>The hdfs path to the Hadoop streaming jar file.</description>
+  </property> 
+
+  <property>
+    <name>templeton.exec.timeout</name>
+    <value>60000</value>
+    <description>Time out for templeton api</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.mysql.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.mysql.sql b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.mysql.sql
new file mode 100644
index 0000000..b0415b1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/etc/hive-schema-0.12.0.mysql.sql
@@ -0,0 +1,777 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  KEY `TBLS_N51` (`LINK_TARGET_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31


[04/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer-err.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer-err.log b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer-err.log
deleted file mode 100644
index d7c6704..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer-err.log
+++ /dev/null
@@ -1,1032 +0,0 @@
-14/07/28 17:01:48 INFO balancer.Balancer: Using a threshold of 5.0
-14/07/28 17:01:48 INFO balancer.Balancer: namenodes = [hdfs://evhubudsd1aae.budapest.epam.com:8020]
-14/07/28 17:01:48 INFO balancer.Balancer: p         = Balancer.Parameters[BalancingPolicy.Node, threshold=5.0]
-14/07/28 17:01:49 INFO balancer.Balancer: Block token params received from NN: keyUpdateInterval=600 min(s), tokenLifetime=600 min(s)
-14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
-14/07/28 17:01:49 INFO balancer.Balancer: Balancer will update its block keys every 150 minute(s)
-14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:01:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.887235026238486]]
-14/07/28 17:01:49 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.178140109955496]]
-14/07/28 17:01:49 INFO balancer.Balancer: Need to move 5.74 GB to make the cluster balanced.
-14/07/28 17:01:49 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:01:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:01:57 INFO balancer.Balancer: Moving block 1073950748 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:01:58 INFO balancer.Balancer: Moving block 1073939272 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:02:06 INFO balancer.Balancer: Moving block 1073863504 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:02:13 INFO balancer.Balancer: Moving block 1073863516 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:02:31 INFO balancer.Balancer: Moving block 1073743089 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:03:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.803451571241915]]
-14/07/28 17:03:00 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.262867215362437]]
-14/07/28 17:03:00 INFO balancer.Balancer: Need to move 5.58 GB to make the cluster balanced.
-14/07/28 17:03:00 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:03:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073937443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926003 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073916372 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073926002 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:00 INFO balancer.Balancer: Moving block 1073920016 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:03:05 INFO balancer.Balancer: Moving block 1073937461 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:03:11 INFO balancer.Balancer: Moving block 1073743437 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:03:20 INFO balancer.Balancer: Moving block 1073743443 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:03:31 INFO balancer.Balancer: Moving block 1073743449 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:03:34 INFO balancer.Balancer: Moving block 1073743440 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:04:07 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:04:07 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.70875539052811]]
-14/07/28 17:04:07 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.35756339607624]]
-14/07/28 17:04:07 INFO balancer.Balancer: Need to move 5.40 GB to make the cluster balanced.
-14/07/28 17:04:07 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:04:07 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:04:07 INFO balancer.Balancer: Moving block 1073743776 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073915941 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073930161 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:04:08 INFO balancer.Balancer: Moving block 1073908316 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:04:09 INFO balancer.Balancer: Moving block 1073930163 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:04:51 INFO balancer.Balancer: Moving block 1073947549 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:05:04 INFO balancer.Balancer: Moving block 1073863141 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:06 INFO balancer.Balancer: Moving block 1073863139 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:05:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:05:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.53815392807349]]
-14/07/28 17:05:14 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.528164858530864]]
-14/07/28 17:05:14 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
-14/07/28 17:05:14 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:05:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945158 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918874 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918873 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945162 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918867 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073945160 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073914540 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073918868 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:14 INFO balancer.Balancer: Moving block 1073931861 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:05:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:05:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.538117645568114]]
-14/07/28 17:05:50 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.52820114103624]]
-14/07/28 17:05:50 INFO balancer.Balancer: Need to move 5.06 GB to make the cluster balanced.
-14/07/28 17:05:50 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:05:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073916888 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073925481 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073920767 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073908143 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073911961 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:05:50 INFO balancer.Balancer: Moving block 1073929306 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:09 INFO balancer.Balancer: Moving block 1073863170 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:06:33 INFO balancer.Balancer: Moving block 1073929250 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:35 INFO balancer.Balancer: Moving block 1073863186 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:06:56 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:06:56 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.407811418798076]]
-14/07/28 17:06:56 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.658507367806276]]
-14/07/28 17:06:56 INFO balancer.Balancer: Need to move 4.81 GB to make the cluster balanced.
-14/07/28 17:06:56 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:06:56 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073919724 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:56 INFO balancer.Balancer: Moving block 1073915864 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073910902 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949844 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926217 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073919721 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073926320 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073946575 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:06:57 INFO balancer.Balancer: Moving block 1073949843 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:07:33 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:07:33 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.4068167244793]]
-14/07/28 17:07:33 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.659502062125057]]
-14/07/28 17:07:33 INFO balancer.Balancer: Need to move 4.80 GB to make the cluster balanced.
-14/07/28 17:07:33 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:07:33 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073948620 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:07:33 INFO balancer.Balancer: Moving block 1073917051 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:07:34 INFO balancer.Balancer: Moving block 1073924651 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:07:40 INFO balancer.Balancer: Moving block 1073742834 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:08:55 INFO balancer.Balancer: Moving block 1073894040 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:08:56 INFO balancer.Balancer: Moving block 1073932476 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:08:59 INFO balancer.Balancer: Moving block 1073742598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:00 INFO balancer.Balancer: Moving block 1073893997 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:09:11 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:09:11 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.144332676814294]]
-14/07/28 17:09:11 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92198610979006]]
-14/07/28 17:09:11 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
-14/07/28 17:09:11 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:09:11 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920127 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743556 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743557 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073929950 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073942945 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920115 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073743559 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073947343 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:11 INFO balancer.Balancer: Moving block 1073920075 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:09:47 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:09:47 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=34.14396676101451]]
-14/07/28 17:09:47 INFO balancer.Balancer: 1 underutilized: [BalancerDatanode[10.253.130.5:50010, utilization=21.92215625345692]]
-14/07/28 17:09:47 INFO balancer.Balancer: Need to move 4.29 GB to make the cluster balanced.
-14/07/28 17:09:47 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:09:47 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951772 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951752 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951754 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:09:47 INFO balancer.Balancer: Moving block 1073951766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:52 INFO balancer.Balancer: Moving block 1073951747 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:09:56 INFO balancer.Balancer: Moving block 1073951765 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:10:53 INFO balancer.Balancer: Moving block 1073951746 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951745 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:10:54 INFO balancer.Balancer: Moving block 1073951744 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:11:24 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:11:24 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.9413931647133]]
-14/07/28 17:11:24 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:11:24 INFO balancer.Balancer: Need to move 3.89 GB to make the cluster balanced.
-14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 5.84 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 2.64 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:11:24 INFO balancer.Balancer: Decided to move 1.31 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:11:24 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940539 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073940537 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927798 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073935420 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073927775 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073923954 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073918163 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073949253 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:11:24 INFO balancer.Balancer: Moving block 1073931581 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073923922 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073931532 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:25 INFO balancer.Balancer: Moving block 1073949248 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073923928 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073927787 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073949252 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073906578 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:11:29 INFO balancer.Balancer: Moving block 1073914353 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073931557 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:11:30 INFO balancer.Balancer: Moving block 1073910459 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:12:00 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:12:00 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.923538618186065]]
-14/07/28 17:12:00 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:12:00 INFO balancer.Balancer: Need to move 3.86 GB to make the cluster balanced.
-14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 2.61 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:12:00 INFO balancer.Balancer: Decided to move 7.18 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:12:00 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073949133 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.7:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945194 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927453 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923118 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905689 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914494 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905688 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073923119 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073914488 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905681 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073905677 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927648 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945235 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073945226 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073910053 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:12:01 INFO balancer.Balancer: Moving block 1073927664 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:12:29 INFO balancer.Balancer: Moving block 1073905173 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905177 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:19 INFO balancer.Balancer: Moving block 1073905171 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:21 INFO balancer.Balancer: Moving block 1073905175 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:27 INFO balancer.Balancer: Moving block 1073905172 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:13:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:13:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.60177342833359]]
-14/07/28 17:13:37 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:13:37 INFO balancer.Balancer: Need to move 3.23 GB to make the cluster balanced.
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.73 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 375.17 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 1.00 GB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Decided to move 3.03 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:13:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914692 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927391 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073927383 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923582 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905952 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073914693 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923467 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918495 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073923466 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948829 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945548 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073948902 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945546 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073905987 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945549 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073918570 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:13:37 INFO balancer.Balancer: Moving block 1073945542 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073927370 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073914708 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.8:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073948908 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.1:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073918565 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:13:38 INFO balancer.Balancer: Moving block 1073923572 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:13:46 INFO balancer.Balancer: Moving block 1073936056 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:49 INFO balancer.Balancer: Moving block 1073936057 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:13:52 INFO balancer.Balancer: Moving block 1073936063 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936045 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:14:09 INFO balancer.Balancer: Moving block 1073936034 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936032 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:14:40 INFO balancer.Balancer: Moving block 1073936033 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:14:41 INFO balancer.Balancer: Moving block 1073936036 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:15:13 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:15:13 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.2458785989085]]
-14/07/28 17:15:13 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:15:13 INFO balancer.Balancer: Need to move 2.53 GB to make the cluster balanced.
-14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 5.46 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:15:13 INFO balancer.Balancer: Decided to move 683.02 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:15:13 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934407 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073926699 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073907624 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930612 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073950332 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934387 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073930508 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073934414 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073945924 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:13 INFO balancer.Balancer: Moving block 1073922816 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073934411 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073926698 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:14 INFO balancer.Balancer: Moving block 1073922838 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073919113 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073922843 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073907649 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:15 INFO balancer.Balancer: Moving block 1073950223 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:15:49 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:15:49 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.23893576243181]]
-14/07/28 17:15:49 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:15:49 INFO balancer.Balancer: Need to move 2.52 GB to make the cluster balanced.
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 375.06 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 4.44 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Decided to move 1.33 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:15:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073931740 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073927810 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923141 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073910191 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073905793 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073940704 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949348 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936134 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914594 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949356 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.8:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936148 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936164 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936158 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073949359 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073918912 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073914616 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073936151 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:15:49 INFO balancer.Balancer: Moving block 1073923999 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:15:50 INFO balancer.Balancer: Moving block 1073940722 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073927855 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073906497 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073949350 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.129.224:50010 is succeeded.
-14/07/28 17:15:51 INFO balancer.Balancer: Moving block 1073945051 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:16:25 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:16:25 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.236639727566796]]
-14/07/28 17:16:25 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:16:25 INFO balancer.Balancer: Need to move 2.51 GB to make the cluster balanced.
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 2.36 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Decided to move 463.99 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:16:25 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942946 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947339 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073912361 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926131 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947341 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073929961 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743570 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916254 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743604 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743581 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073926130 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073920078 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916287 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073933727 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908503 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743586 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743580 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937539 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073908497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073942916 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743590 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073947329 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743599 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073743600 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073895265 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073937542 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916258 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:16:25 INFO balancer.Balancer: Moving block 1073916286 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.1:50010 is succeeded.
-14/07/28 17:16:47 INFO balancer.Balancer: Moving block 1073862841 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:17:01 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:17:01 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1720712908457]]
-14/07/28 17:17:01 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:17:01 INFO balancer.Balancer: Need to move 2.39 GB to make the cluster balanced.
-14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 3.66 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 5.45 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:17:01 INFO balancer.Balancer: Decided to move 698.32 MB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:17:01 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915689 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073946573 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915690 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915841 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073919491 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915694 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073915842 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073949829 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:17:01 INFO balancer.Balancer: Moving block 1073895888 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.129.223:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949830 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073922418 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073931011 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949848 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904475 from 10.253.130.9:50010 to 10.253.129.224:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073946583 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073904561 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073949813 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073915703 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:17:02 INFO balancer.Balancer: Moving block 1073926226 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:17:37 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:17:37 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.17123487505752]]
-14/07/28 17:17:37 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:17:37 INFO balancer.Balancer: Need to move 2.38 GB to make the cluster balanced.
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.23 GB bytes from 10.253.130.9:50010 to 10.253.130.8:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 373.37 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Decided to move 2.76 GB bytes from 10.253.130.9:50010 to 10.253.130.1:50010
-14/07/28 17:17:37 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951505 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951406 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951465 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951428 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951479 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951294 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951363 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951445 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951368 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951466 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951325 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.129.224:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951296 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951333 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951315 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951502 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951383 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.4:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951489 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951504 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951313 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951326 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:38 INFO balancer.Balancer: Moving block 1073951310 from 10.253.130.9:50010 to 10.253.130.1:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073951520 from 10.253.130.9:50010 to 10.253.130.8:50010 through 10.253.130.1:50010 is succeeded.
-14/07/28 17:17:44 INFO balancer.Balancer: Moving block 1073864141 from 10.253.130.9:50010 to 10.253.130.7:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:18:14 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:18:14 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.13074467796647]]
-14/07/28 17:18:14 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:18:14 INFO balancer.Balancer: Need to move 2.31 GB to make the cluster balanced.
-14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 9.08 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:18:14 INFO balancer.Balancer: Decided to move 729.65 MB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:18:14 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935830 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931492 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931497 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073913899 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910416 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928121 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073931496 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927763 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935825 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935414 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928117 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073928114 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935419 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073927766 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073935418 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073910423 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is succeeded.
-14/07/28 17:18:14 INFO balancer.Balancer: Moving block 1073949598 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:18:50 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:18:50 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.1305062958578]]
-14/07/28 17:18:50 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:18:50 INFO balancer.Balancer: Need to move 2.30 GB to make the cluster balanced.
-14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 895.07 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 1.53 GB bytes from 10.253.130.9:50010 to 10.253.129.223:50010
-14/07/28 17:18:50 INFO balancer.Balancer: Decided to move 7.38 GB bytes from 10.253.130.9:50010 to 10.253.130.5:50010
-14/07/28 17:18:50 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930642 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950456 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.10:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934505 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073950457 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.8:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934524 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930646 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073915219 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934502 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073930640 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.5:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073926854 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934510 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.129.225:50010 is succeeded.
-14/07/28 17:18:50 INFO balancer.Balancer: Moving block 1073934503 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926851 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.11:50010 is succeeded.
-14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073926857 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:18:51 INFO balancer.Balancer: Moving block 1073930652 from 10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.3:50010 is succeeded.
-14/07/28 17:18:52 INFO balancer.Balancer: Moving block 1073930651 from 10.253.130.9:50010 to 10.253.130.2:50010 through 10.253.130.6:50010 is succeeded.
-14/07/28 17:19:02 INFO balancer.Balancer: Moving block 1073934496 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:19:03 INFO balancer.Balancer: Moving block 1073934497 from 10.253.130.9:50010 to 10.253.129.223:50010 through 10.253.130.0:50010 is succeeded.
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.3:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.7:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.10:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.11:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.2:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.6:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.224:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.1:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.223:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.9:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.129.225:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.5:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.8:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.4:50010
-14/07/28 17:19:26 INFO net.NetworkTopology: Adding a new node: /default-rack/10.253.130.0:50010
-14/07/28 17:19:26 INFO balancer.Balancer: 1 over-utilized: [Source[10.253.130.9:50010, utilization=33.07965400229293]]
-14/07/28 17:19:26 INFO balancer.Balancer: 0 underutilized: []
-14/07/28 17:19:26 INFO balancer.Balancer: Need to move 2.21 GB to make the cluster balanced.
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 333.25 MB bytes from 10.253.130.9:50010 to 10.253.130.7:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.43 GB bytes from 10.253.130.9:50010 to 10.253.130.10:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 881.78 MB bytes from 10.253.130.9:50010 to 10.253.130.2:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Decided to move 4.17 GB bytes from 10.253.130.9:50010 to 10.253.129.224:50010
-14/07/28 17:19:26 INFO balancer.Balancer: Will move 9.79 GB in this iteration
-14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073931910 from 10.253.130.9:50010 to 10.253.130.10:50010 through 10.253.130.9:50010 is succeeded.
-14/07/28 17:19:26 INFO balancer.Balancer: Moving block 1073905704 from 10.253.130.9:50010 to 10.253.130.7:50010 thr

<TRUNCATED>

[18/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmond.init b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmond.init
deleted file mode 100644
index afb7026..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmond.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmond startup script
-# processname: hdp-gmond
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
-HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
-HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmond..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmond..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmond..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh
deleted file mode 100644
index d06afd8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmondLib.sh
+++ /dev/null
@@ -1,538 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMOND_BIN=/usr/sbin/gmond;
-GMOND_CORE_CONF_FILE=gmond.core.conf;
-GMOND_MASTER_CONF_FILE=gmond.master.conf;
-GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
-GMOND_PID_FILE=gmond.pid;
-
-# Functions.
-function getGmondCoreConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
-    fi
-}
-
-function getGmondMasterConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    fi
-}
-
-function getGmondSlaveConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    fi
-}
-
-function getGmondPidFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
-    else
-        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
-    fi
-}
-
-function getGmondLoggedPid()
-{
-    gmondPidFile=`getGmondPidFileName ${1}`;
-
-    if [ -e "${gmondPidFile}" ]
-    then
-        echo `cat ${gmondPidFile}`;
-    fi
-}
-
-function getGmondRunningPid()
-{
-    gmondLoggedPid=`getGmondLoggedPid ${1}`;
-
-    if [ -n "${gmondLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmondCoreConf()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_CORE_CONF
-#################### Generated by ${0} on ${now} ####################
-#
-/* This configuration is as close to 2.5.x default behavior as possible
-   The values closely match ./gmond/metric.h definitions in 2.5.x */
-globals {
-  daemonize = yes
-  setuid = yes
-  user = ${GMOND_USER}
-  debug_level = 0
-  max_udp_msg_len = 1472
-  mute = no
-  deaf = no 
-  allow_extra_data = yes
-  host_dmax = 0 /*secs */
-  host_tmax = 20 /*secs */
-  cleanup_threshold = 300 /*secs */
-  gexec = no
-  send_metadata_interval = 30 /*secs */
-}
-
-/*
- * The cluster attributes specified will be used as part of the <CLUSTER>
- * tag that will wrap all hosts collected by this instance.
- */
-cluster {
-  name = "${gmondClusterName}"
-  owner = "unspecified"
-  latlong = "unspecified"
-  url = "unspecified"
-}
-
-/* The host section describes attributes of the host, like the location */
-host {
-  location = "unspecified"
-}
-
-/* You can specify as many tcp_accept_channels as you like to share
- * an XML description of the state of the cluster.
- *
- * At the very least, every gmond must expose its XML state to 
- * queriers from localhost.
- */
-tcp_accept_channel {
-  bind = 0.0.0.0
-  port = ${gmondPort}
-}
-
-/* Each metrics module that is referenced by gmond must be specified and
-   loaded. If the module has been statically linked with gmond, it does
-   not require a load path. However all dynamically loadable modules must
-   include a load path. */
-modules {
-  module {
-    name = "core_metrics"
-  }
-  module {
-    name = "cpu_module"
-    path = "${MODULES_DIR}/modcpu.so"
-  }
-  module {
-    name = "disk_module"
-    path = "${MODULES_DIR}/moddisk.so"
-  }
-  module {
-    name = "load_module"
-    path = "${MODULES_DIR}/modload.so"
-  }
-  module {
-    name = "mem_module"
-    path = "${MODULES_DIR}/modmem.so"
-  }
-  module {
-    name = "net_module"
-    path = "${MODULES_DIR}/modnet.so"
-  }
-  module {
-    name = "proc_module"
-    path = "${MODULES_DIR}/modproc.so"
-  }
-  module {
-    name = "sys_module"
-    path = "${MODULES_DIR}/modsys.so"
-  }
-}
-
-/* The old internal 2.5.x metric array has been replaced by the following
-   collection_group directives.  What follows is the default behavior for
-   collecting and sending metrics that is as close to 2.5.x behavior as
-   possible. */
-
-/* This collection group will cause a heartbeat (or beacon) to be sent every
-   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
-   the age of the running gmond. */
-collection_group {
-  collect_once = yes
-  time_threshold = 20
-  metric {
-    name = "heartbeat"
-  }
-}
-
-/* This collection group will send general info about this host total memory every
-   180 secs.
-   This information doesn't change between reboots and is only collected
-   once. This information needed for heatmap showing */
- collection_group {
-   collect_once = yes
-   time_threshold = 180
-   metric {
-    name = "mem_total"
-    title = "Memory Total"
-   }
- }
-
-/* This collection group will send general info about this host every
-   1200 secs.
-   This information doesn't change between reboots and is only collected
-   once. */
-collection_group {
-  collect_once = yes
-  time_threshold = 1200
-  metric {
-    name = "cpu_num"
-    title = "CPU Count"
-  }
-  metric {
-    name = "cpu_speed"
-    title = "CPU Speed"
-  }
-  /* Should this be here? Swap can be added/removed between reboots. */
-  metric {
-    name = "swap_total"
-    title = "Swap Space Total"
-  }
-  metric {
-    name = "boottime"
-    title = "Last Boot Time"
-  }
-  metric {
-    name = "machine_type"
-    title = "Machine Type"
-  }
-  metric {
-    name = "os_name"
-    title = "Operating System"
-  }
-  metric {
-    name = "os_release"
-    title = "Operating System Release"
-  }
-  metric {
-    name = "location"
-    title = "Location"
-  }
-}
-
-/* This collection group will send the status of gexecd for this host
-   every 300 secs.*/
-/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
-collection_group {
-  collect_once = yes
-  time_threshold = 300
-  metric {
-    name = "gexec"
-    title = "Gexec Status"
-  }
-}
-
-/* This collection group will collect the CPU status info every 20 secs.
-   The time threshold is set to 90 seconds.  In honesty, this
-   time_threshold could be set significantly higher to reduce
-   unneccessary  network chatter. */
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* CPU status */
-  metric {
-    name = "cpu_user"
-    value_threshold = "1.0"
-    title = "CPU User"
-  }
-  metric {
-    name = "cpu_system"
-    value_threshold = "1.0"
-    title = "CPU System"
-  }
-  metric {
-    name = "cpu_idle"
-    value_threshold = "5.0"
-    title = "CPU Idle"
-  }
-  metric {
-    name = "cpu_nice"
-    value_threshold = "1.0"
-    title = "CPU Nice"
-  }
-  metric {
-    name = "cpu_aidle"
-    value_threshold = "5.0"
-    title = "CPU aidle"
-  }
-  metric {
-    name = "cpu_wio"
-    value_threshold = "1.0"
-    title = "CPU wio"
-  }
-  /* The next two metrics are optional if you want more detail...
-     ... since they are accounted for in cpu_system.
-  metric {
-    name = "cpu_intr"
-    value_threshold = "1.0"
-    title = "CPU intr"
-  }
-  metric {
-    name = "cpu_sintr"
-    value_threshold = "1.0"
-    title = "CPU sintr"
-  }
-  */
-}
-
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* Load Averages */
-  metric {
-    name = "load_one"
-    value_threshold = "1.0"
-    title = "One Minute Load Average"
-  }
-  metric {
-    name = "load_five"
-    value_threshold = "1.0"
-    title = "Five Minute Load Average"
-  }
-  metric {
-    name = "load_fifteen"
-    value_threshold = "1.0"
-    title = "Fifteen Minute Load Average"
-  }
-}
-
-/* This group collects the number of running and total processes */
-collection_group {
-  collect_every = 80
-  time_threshold = 950
-  metric {
-    name = "proc_run"
-    value_threshold = "1.0"
-    title = "Total Running Processes"
-  }
-  metric {
-    name = "proc_total"
-    value_threshold = "1.0"
-    title = "Total Processes"
-  }
-}
-
-/* This collection group grabs the volatile memory metrics every 40 secs and
-   sends them at least every 180 secs.  This time_threshold can be increased
-   significantly to reduce unneeded network traffic. */
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "mem_free"
-    value_threshold = "1024.0"
-    title = "Free Memory"
-  }
-  metric {
-    name = "mem_shared"
-    value_threshold = "1024.0"
-    title = "Shared Memory"
-  }
-  metric {
-    name = "mem_buffers"
-    value_threshold = "1024.0"
-    title = "Memory Buffers"
-  }
-  metric {
-    name = "mem_cached"
-    value_threshold = "1024.0"
-    title = "Cached Memory"
-  }
-  metric {
-    name = "swap_free"
-    value_threshold = "1024.0"
-    title = "Free Swap Space"
-  }
-}
-
-collection_group {
-  collect_every = 40
-  time_threshold = 300
-  metric {
-    name = "bytes_out"
-    value_threshold = 4096
-    title = "Bytes Sent"
-  }
-  metric {
-    name = "bytes_in"
-    value_threshold = 4096
-    title = "Bytes Received"
-  }
-  metric {
-    name = "pkts_in"
-    value_threshold = 256
-    title = "Packets Received"
-  }
-  metric {
-    name = "pkts_out"
-    value_threshold = 256
-    title = "Packets Sent"
-  }
-}
-
-
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "disk_free"
-    value_threshold = 1.0
-    title = "Disk Space Available"
-  }
-  metric {
-    name = "part_max_used"
-    value_threshold = 1.0
-    title = "Maximum Disk Space Used"
-  }
-  metric {
-    name = "disk_total"
-    value_threshold = 1.0
-    title = "Total Disk Space"
-  }
-}
-
-udp_recv_channel {
-    port = 0
-}
-
-
-include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
-END_OF_GMOND_CORE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondMasterConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_MASTER_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Masters only receive; they never send. */
-udp_recv_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-
-END_OF_GMOND_MASTER_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondSlaveConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_SLAVE_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Slaves only send; they never receive. */
-udp_send_channel {
-  #bind_hostname = yes # Highly recommended, soon to be default.
-                       # This option tells gmond to use a source address
-                       # that resolves to the machine's hostname.  Without
-                       # this, the metrics may appear to come from any
-                       # interface and the DNS names associated with
-                       # those IPs will be used to create the RRDs.
-  host = ${gmondMasterIP}
-  port = ${gmondPort}
-  ttl = 1
-}
-END_OF_GMOND_SLAVE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrdcachedLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrdcachedLib.sh
deleted file mode 100644
index a070fca..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/rrdcachedLib.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-RRDCACHED_BIN=/usr/bin/rrdcached;
-RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
-RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
-RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
-
-function getRrdcachedLoggedPid()
-{
-    if [ -e "${RRDCACHED_PID_FILE}" ]
-    then
-        echo `cat ${RRDCACHED_PID_FILE}`;
-    fi
-}
-
-function getRrdcachedRunningPid()
-{
-    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
-
-    if [ -n "${rrdcachedLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/setupGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/setupGanglia.sh
deleted file mode 100644
index 704766e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/setupGanglia.sh
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh
-
-function usage()
-{
-  cat << END_USAGE
-Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
-
-Options:
-  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
-
-  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
-                          Cluster. Without this, we generate slave gmond configuration.
-
-  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
-                          gmond configuration that is generated without this).
-  -o <owner>              Owner
-  -g <group>              Group
-END_USAGE
-}
-
-function instantiateGmetadConf()
-{
-  # gmetad utility library.
-  source ./gmetadLib.sh;
-
-  generateGmetadConf > ${GMETAD_CONF_FILE};
-}
-
-function instantiateGmondConf()
-{
-  # gmond utility library.
-  source ./gmondLib.sh;
- 
-  gmondClusterName=${1};
-
-  if [ "x" != "x${gmondClusterName}" ]
-  then
-
-    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
-    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
-    
-    # Always blindly generate the core gmond config - that goes on every box running gmond. 
-    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
-
-    isMasterGmond=${2};
-
-    # Decide whether we want to add on the master or slave gmond config.
-    if [ "0" -eq "${isMasterGmond}" ]
-    then
-      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
-    else
-      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
-    fi
-
-    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
-
-  else
-    echo "No gmondClusterName passed in, nothing to instantiate";
-  fi
-}
-
-# main()
-
-gmondClusterName=;
-isMasterGmond=0;
-configureGmetad=0;
-owner='root';
-group='root';
-
-while getopts ":c:mto:g:" OPTION
-do
-  case ${OPTION} in
-    c) 
-      gmondClusterName=${OPTARG};
-      ;;
-    m)
-      isMasterGmond=1;
-      ;;
-    t)
-      configureGmetad=1;
-      ;;
-    o)
-      owner=${OPTARG};
-      ;;
-    g)
-      group=${OPTARG};
-      ;;
-    ?)
-      usage;
-      exit 1;
-  esac
-done
-
-# Initialization.
-createDirectory ${GANGLIA_CONF_DIR};
-createDirectory ${GANGLIA_RUNTIME_DIR};
-# So rrdcached can drop its PID files in here.
-chmod -R o+rw ${GANGLIA_RUNTIME_DIR};
-chown ${owner}:${group} ${GANGLIA_CONF_DIR};
-
-if [ -n "${gmondClusterName}" ]
-then
-
-  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
-  if [ "1" -eq "${configureGmetad}" ]
-  then
-    instantiateGmetadConf;
-  else
-    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
-  fi
-
-elif [ "1" -eq "${configureGmetad}" ]
-then
-  instantiateGmetadConf;
-else
-  usage;
-  exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmetad.sh
deleted file mode 100644
index b271e06..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmetad.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
-source ./rrdcachedLib.sh;
-
-# Before starting gmetad, start rrdcached.
-./startRrdcached.sh;
-
-if [ $? -eq 0 ] 
-then
-    gmetadRunningPid=`getGmetadRunningPid`;
-
-    # Only attempt to start gmetad if there's not already one running.
-    if [ -f "${GMETAD_PID_FILE}" ] && [ -z "${gmetadRunningPid}" ]
-    then
-      rm -f ${GMETAD_PID_FILE}; rm -f /var/lock/subsys/hdp-gmetad
-    fi
-    if [ -z "${gmetadRunningPid}" ]
-    then
-        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
-
-        for i in `seq 0 5`; do
-          gmetadRunningPid=`getGmetadRunningPid`;
-          if [ -n "${gmetadRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-
-        if [ -n "${gmetadRunningPid}" ]
-        then
-            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
-        else
-            echo "Failed to start ${GMETAD_BIN}";
-            exit 1;
-        fi
-    else
-        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
-    fi
-else
-    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
-    exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmond.sh
deleted file mode 100644
index cdf4fe0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startGmond.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function startGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-    gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
-    # Only attempt to start gmond if there's not already one running.
-    if [ -f "${gmondPidFileName}" ] && [ -z "${gmondRunningPid}" ]
-    then
-      rm -f ${gmondPidFileName}
-    fi
-    if [ -z "${gmondRunningPid}" ]
-    then
-      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-      if [ -e "${gmondCoreConfFileName}" ]
-      then 
-        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
-        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
-
-        for i in `seq 0 5`; do
-          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-          if [ -n "${gmondRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-  
-        if [ -n "${gmondRunningPid}" ]
-        then
-            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
-        else
-            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
-            exit 1;
-        fi
-      fi 
-    else
-      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so start 
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        startGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just start the one ${gmondClusterName} that was asked for.
-    startGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startRrdcached.sh
deleted file mode 100644
index dc47f39..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/startRrdcached.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only attempt to start rrdcached if there's not already one running.
-if [ -z "${rrdcachedRunningPid}" ]
-then
-    su -s /bin/bash - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-             -b ${RRDCACHED_BASE_DIR} -B -t ${RRDCACHED_WRITE_THREADS} \
-             -w ${RRDCACHED_TIMEOUT} -f ${RRDCACHED_FLUSH_TIMEOUT} -z ${RRDCACHED_DELAY} -F"
-
-    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
-    # this, but it doesn't take sometimes due to a lack of permissions,
-    # so perform the operation explicitly to be super-sure.
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
-
-    # Check to make sure rrdcached actually started up.
-    for i in `seq 0 5`; do
-      rrdcachedRunningPid=`getRrdcachedRunningPid`;
-      if [ -n "${rrdcachedRunningPid}" ]
-        then
-          break;
-      fi
-      sleep 1;
-    done
-
-    if [ -n "${rrdcachedRunningPid}" ]
-    then
-        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
-    else
-        echo "Failed to start ${RRDCACHED_BIN}";
-        exit 1;
-    fi
-
-    #Configure Ganglia Web to work with RRDCached
-    GANGLIA_WEB_CONFIG_FILE=${GANGLIA_WEB_PATH}/conf_default.php
-
-    if [ -f $GANGLIA_WEB_CONFIG_FILE ]
-    then
-      sed -i "s@\$conf\['rrdcached_socket'] =.*@\$conf\['rrdcached_socket'] = \"unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET}\";@" $GANGLIA_WEB_CONFIG_FILE
-      sed -i "s@\$conf\['rrds'] =.*@\$conf\['rrds'] = \"${RRDCACHED_BASE_DIR}\";@" $GANGLIA_WEB_CONFIG_FILE
-      sed -i "s@\$conf\['gmetad_root'] =.*@\$conf\['gmetad_root'] = \"${RRDCACHED_BASE_DIR}\";@" $GANGLIA_WEB_CONFIG_FILE
-    else
-      echo "${GANGLIA_WEB_CONFIG_FILE} can't be found";
-      exit 1;
-    fi
-
-else
-    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmetad.sh
deleted file mode 100644
index 86731d2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmetad.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${gmetadRunningPid}" ]
-then
-    kill -KILL ${gmetadRunningPid};
-    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
-fi
-
-# Poll again.
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Once we've killed gmetad, there should no longer be a running PID.
-if [ -z "${gmetadRunningPid}" ]
-then
-    # It's safe to stop rrdcached now.
-    ./stopRrdcached.sh;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmond.sh
deleted file mode 100644
index 8824d2a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopGmond.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function stopGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only go ahead with the termination if we could find a running PID.
-    if [ -n "${gmondRunningPid}" ]
-    then
-      kill -KILL ${gmondRunningPid};
-      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so stop
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        stopGmondForCluster ${gmondClusterName};
-    done
-else
-    stopGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopRrdcached.sh
deleted file mode 100644
index bffc9e5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/stopRrdcached.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${rrdcachedRunningPid}" ]
-then
-    kill -TERM ${rrdcachedRunningPid};
-    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
-    # until we're sure it's well and truly dead. 
-    #
-    # Without this, an immediately following startRrdcached.sh won't do
-    # anything, because it still sees this soon-to-die instance alive,
-    # and the net result is that after a few seconds, there's no
-    # ${RRDCACHED_BIN} running on the box anymore.
-    sleep 5;
-    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
-fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/teardownGanglia.sh
deleted file mode 100644
index 8740c27..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/teardownGanglia.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh;
-
-# Undo what we did while setting up Ganglia on this box.
-rm -rf ${GANGLIA_CONF_DIR};
-rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/functions.py
deleted file mode 100644
index b02e688..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/functions.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-from resource_management import *
-
-
-def turn_off_autostart(service):
-  if System.get_instance().os_family == "ubuntu":
-    Execute(format("update-rc.d {service} disable"),
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    )
-    Execute(format("service {service} stop"), ignore_failures=True)
-    Execute(format("echo 'manual' > /etc/init/{service}.override")) # disbale upstart job
-  else:
-    Execute(format("chkconfig {service} off"),
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia.py
deleted file mode 100644
index 69fde27..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-import os
-
-
-def groups_and_users():
-  import params
-
-def config():
-  import params
-
-  shell_cmds_dir = params.ganglia_shell_cmds_dir
-  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
-                 'gmondLib.sh', 'rrdcachedLib.sh',
-                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
-                 'startRrdcached.sh', 'stopGmetad.sh',
-                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
-  Directory(shell_cmds_dir,
-            owner="root",
-            group="root",
-            recursive=True
-  )
-  init_file("gmetad")
-  init_file("gmond")
-  for sh_file in shell_files:
-    shell_file(sh_file)
-  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
-    ganglia_TemplateConfig(conf_file)
-
-
-def init_file(name):
-  import params
-
-  File("/etc/init.d/hdp-" + name,
-       content=StaticFile(name + ".init"),
-       mode=0755
-  )
-
-
-def shell_file(name):
-  import params
-
-  File(params.ganglia_shell_cmds_dir + os.sep + name,
-       content=StaticFile(name),
-       mode=0755
-  )
-
-
-def ganglia_TemplateConfig(name, mode=0755, tag=None):
-  import params
-
-  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
-                 owner="root",
-                 group="root",
-                 template_tag=tag,
-                 mode=mode
-  )
-
-
-def generate_daemon(ganglia_service,
-                    name=None,
-                    role=None,
-                    owner=None,
-                    group=None):
-  import params
-
-  cmd = ""
-  if ganglia_service == "gmond":
-    if role == "server":
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
-    else:
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
-  elif ganglia_service == "gmetad":
-    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
-  else:
-    raise Fail("Unexpected ganglia service")
-  Execute(format(cmd),
-          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
-                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py
deleted file mode 100644
index 05ceaff..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py
+++ /dev/null
@@ -1,243 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import functions
-import ganglia_monitor_service
-
-
-class GangliaMonitor(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.configure(env)
-
-    functions.turn_off_autostart(params.gmond_service_name)
-    functions.turn_off_autostart("gmetad") # since the package is installed as well
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    ganglia_monitor_service.monitor("start")
-
-  def stop(self, env, rolling_restart=False):
-    ganglia_monitor_service.monitor("stop")
-
-
-  def status(self, env):
-    import status_params
-    pid_file_name = 'gmond.pid'
-    pid_file_count = 0
-    pid_dir = status_params.pid_dir
-    # Recursively check all existing gmond pid files
-    for cur_dir, subdirs, files in os.walk(pid_dir):
-      for file_name in files:
-        if file_name == pid_file_name:
-          pid_file = os.path.join(cur_dir, file_name)
-          check_process_status(pid_file)
-          pid_file_count += 1
-    if pid_file_count == 0: # If no any pid file is present
-      raise ComponentIsNotRunning()
-
-
-  def configure(self, env):
-    import params
-
-    ganglia.groups_and_users()
-
-    Directory(params.ganglia_conf_dir,
-              owner="root",
-              group=params.user_group,
-              recursive=True
-    )
-
-    ganglia.config()
-
-    self.generate_slave_configs()
-
-    Directory(path.join(params.ganglia_dir, "conf.d"),
-              owner="root",
-              group=params.user_group
-    )
-
-    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "gmond.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-    if params.is_ganglia_server_host:
-      self.generate_master_configs()
-
-      if len(params.gmond_apps) != 0:
-        self.generate_app_configs()
-        pass
-      pass
-
-
-  def generate_app_configs(self):
-    import params
-
-    for gmond_app in params.gmond_apps:
-      generate_daemon("gmond",
-                      name=gmond_app[0],
-                      role="server",
-                      owner="root",
-                      group=params.user_group)
-      generate_daemon("gmond",
-                      name = gmond_app[0],
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-    pass
-
-  def generate_slave_configs(self):
-    import params
-
-    generate_daemon("gmond",
-                    name = "HDPSlaves",
-                    role = "monitor",
-                    owner = "root",
-                    group = params.user_group)
-
-
-  def generate_master_configs(self):
-    import params
-
-    if params.has_namenodes:
-      generate_daemon("gmond",
-                      name = "HDPNameNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_jobtracker:
-      generate_daemon("gmond",
-                      name = "HDPJobTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_masters:
-      generate_daemon("gmond",
-                      name = "HDPHBaseMaster",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_resourcemanager:
-      generate_daemon("gmond",
-                      name = "HDPResourceManager",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_nodemanager:
-      generate_daemon("gmond",
-                      name = "HDPNodeManager",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_historyserver:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_slaves:
-      generate_daemon("gmond",
-                      name = "HDPDataNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_tasktracker:
-      generate_daemon("gmond",
-                      name = "HDPTaskTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_rs:
-      generate_daemon("gmond",
-                      name = "HDPHBaseRegionServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_nimbus_server:
-      generate_daemon("gmond",
-                      name = "HDPNimbus",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_supervisor_server:
-      generate_daemon("gmond",
-                      name = "HDPSupervisor",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_kafka_broker:
-      generate_daemon("gmond",
-                      name = "HDPKafka",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_flume:
-      generate_daemon("gmond",
-                      name = "HDPFlumeServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_journalnode:
-      generate_daemon("gmond",
-                      name = "HDPJournalNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    generate_daemon("gmond",
-                    name = "HDPSlaves",
-                    role = "server",
-                    owner = "root",
-                    group = params.user_group)
-
-
-if __name__ == "__main__":
-  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor_service.py
deleted file mode 100644
index cf7a4b1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_monitor_service.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def monitor(action=None):# 'start' or 'stop'
-  Execute(
-    format(
-      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
-    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server.py
deleted file mode 100644
index 3492fff..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server.py
+++ /dev/null
@@ -1,123 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import functions
-import ganglia_server_service
-
-
-class GangliaServer(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.configure(env)
-    
-    functions.turn_off_autostart(params.gmond_service_name) # since the package is installed as well
-    functions.turn_off_autostart("gmetad")
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    ganglia_server_service.server("start")
-
-  def stop(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    ganglia_server_service.server("stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/gmetad.pid")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    ganglia.groups_and_users()
-    ganglia.config()
-
-    generate_daemon("gmetad",
-                    name = "gmetad",
-                    role = "server",
-                    owner = "root",
-                    group = params.user_group)
-
-    change_permission()
-    server_files()
-    File(path.join(params.ganglia_dir, "gmetad.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-
-def change_permission():
-  import params
-
-  Directory(os.path.abspath(os.path.join(params.ganglia_runtime_dir, "..")),
-            mode=0755,
-            recursive=True
-  )
-  Directory(params.dwoo_path,
-            mode=0755,
-            recursive=True
-  )
-  Execute(format("chown -R {web_user} {dwoo_path}"))
-
-def server_files():
-  import params
-
-  rrd_py_path = params.rrd_py_path
-  Directory(rrd_py_path,
-            recursive=True
-  )
-  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
-  TemplateConfig(rrd_py_file_path,
-                 owner="root",
-                 group="root",
-                 mode=0755
-  )
-  rrd_file_owner = params.gmetad_user
-
-  Directory(params.rrdcached_base_dir,
-            owner=rrd_file_owner,
-            group=rrd_file_owner,
-            mode=0755,
-            recursive=True
-  )
-  
-  if System.get_instance().os_family in ["ubuntu","suse"]:
-    File( params.ganglia_apache_config_file,
-      content = Template("ganglia.conf.j2"),
-      mode = 0644
-    )
-
-
-if __name__ == "__main__":
-  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server_service.py
deleted file mode 100644
index b93e3f8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/ganglia_server_service.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def server(action=None):# 'start' or 'stop'
-  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-  Execute(format(command),
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )
-  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py
deleted file mode 100644
index 5fe6335..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py
+++ /dev/null
@@ -1,164 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import os
-
-config = Script.get_config()
-
-user_group = config['configurations']['cluster-env']["user_group"]
-ganglia_conf_dir = default("/configurations/ganglia-env/ganglia_conf_dir", "/etc/ganglia/hdp")
-ganglia_dir = "/etc/ganglia"
-ganglia_runtime_dir = config['configurations']['ganglia-env']["ganglia_runtime_dir"]
-ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
-
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-
-gmond_add_clusters_str = default("/configurations/ganglia-env/additional_clusters", None)
-if gmond_add_clusters_str and gmond_add_clusters_str.isspace():
-  gmond_add_clusters_str = None
-
-gmond_app_strs = [] if gmond_add_clusters_str is None else gmond_add_clusters_str.split(',')
-gmond_apps = []
-
-for x in gmond_app_strs:
-  a,b = x.strip().split(':')
-  gmond_apps.append((a.strip(),b.strip()))
-
-if System.get_instance().os_family == "ubuntu":
-  gmond_service_name = "ganglia-monitor"
-  modules_dir = "/usr/lib/ganglia"
-else:
-  gmond_service_name = "gmond"
-  modules_dir = "/usr/lib64/ganglia"
-
-webserver_group = "apache"
-rrdcached_base_dir = config['configurations']['ganglia-env']["rrdcached_base_dir"]
-rrdcached_timeout = default("/configurations/ganglia-env/rrdcached_timeout", 3600)
-rrdcached_flush_timeout = default("/configurations/ganglia-env/rrdcached_flush_timeout", 7200)
-rrdcached_delay = default("/configurations/ganglia-env/rrdcached_delay", 1800)
-rrdcached_write_threads = default("/configurations/ganglia-env/rrdcached_write_threads", 4)
-
-ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
-
-hostname = config["hostname"]
-namenode_host = set(default("/clusterHostInfo/namenode_host", []))
-jtnode_host = set(default("/clusterHostInfo/jtnode_host", []))
-rm_host = set(default("/clusterHostInfo/rm_host", []))
-hs_host = set(default("/clusterHostInfo/hs_host", []))
-hbase_master_hosts = set(default("/clusterHostInfo/hbase_master_hosts", []))
-# datanodes are marked as slave_hosts
-slave_hosts = set(default("/clusterHostInfo/slave_hosts", []))
-tt_hosts = set(default("/clusterHostInfo/mapred_tt_hosts", []))
-nm_hosts = set(default("/clusterHostInfo/nm_hosts", []))
-hbase_rs_hosts = set(default("/clusterHostInfo/hbase_rs_hosts", []))
-flume_hosts = set(default("/clusterHostInfo/flume_hosts", []))
-jn_hosts = set(default("/clusterHostInfo/journalnode_hosts", []))
-nimbus_server_hosts = set(default("/clusterHostInfo/nimbus_hosts", []))
-supervisor_server_hosts = set(default("/clusterHostInfo/supervisor_hosts", []))
-kafka_broker_hosts =  set(default("/clusterHostInfo/kafka_broker_hosts", []))
-kafka_ganglia_port = default("/configurations/kafka-broker/kafka.ganglia.metrics.port", 8671)
-
-pure_slave = not hostname in (namenode_host | jtnode_host | rm_host | hs_host | \
-                              hbase_master_hosts | slave_hosts | tt_hosts | hbase_rs_hosts | \
-                              flume_hosts | nm_hosts | jn_hosts | nimbus_server_hosts | \
-                              supervisor_server_hosts)
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-is_tasktracker = hostname in tt_hosts
-is_nodemanager = hostname in nm_hosts
-is_hbase_rs = hostname in hbase_rs_hosts
-is_flume = hostname in flume_hosts
-is_ganglia_server_host = (hostname == ganglia_server_host)
-is_jn_host = hostname in jn_hosts
-is_nimbus_host = hostname in nimbus_server_hosts
-is_supervisor_host = hostname in supervisor_server_hosts
-
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_historyserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_tasktracker = not len(tt_hosts) == 0
-has_nodemanager = not len(nm_hosts) == 0
-has_hbase_rs = not len(hbase_rs_hosts) == 0
-has_flume = not len(flume_hosts) == 0
-has_journalnode = not len(jn_hosts) == 0
-has_nimbus_server = not len(nimbus_server_hosts) == 0
-has_supervisor_server = not len(supervisor_server_hosts) == 0
-has_kafka_broker = not len(kafka_broker_hosts) == 0
-
-ganglia_cluster_names = {
-  "jn_hosts": [("HDPJournalNode", 8654)],
-  "flume_hosts": [("HDPFlumeServer", 8655)],
-  "hbase_rs_hosts": [("HDPHBaseRegionServer", 8656)],
-  "nm_hosts": [("HDPNodeManager", 8657)],
-  "mapred_tt_hosts": [("HDPTaskTracker", 8658)],
-  "slave_hosts": [("HDPDataNode", 8659)],
-  "namenode_host": [("HDPNameNode", 8661)],
-  "jtnode_host": [("HDPJobTracker", 8662)],
-  "hbase_master_hosts": [("HDPHBaseMaster", 8663)],
-  "rm_host": [("HDPResourceManager", 8664)],
-  "hs_host": [("HDPHistoryServer", 8666)],
-  "nimbus_hosts": [("HDPNimbus", 8649)],
-  "supervisor_hosts": [("HDPSupervisor", 8650)],
-  "kafka_broker_hosts": [("HDPKafka", kafka_ganglia_port)],
-  "ReservedPort1": [("ReservedPort1", 8667)],
-  "ReservedPort2": [("ReservedPort2", 8668)],
-  "ReservedPort3": [("ReservedPort3", 8669)]
-}
-
-ganglia_clusters = [("HDPSlaves", 8660)]
-
-for key in ganglia_cluster_names:
-  property_name = format("/clusterHostInfo/{key}")
-  hosts = set(default(property_name, []))
-  if not len(hosts) == 0:
-    for x in ganglia_cluster_names[key]:
-      ganglia_clusters.append(x)
-
-if len(gmond_apps) > 0:
-  for gmond_app in gmond_apps:
-    ganglia_clusters.append(gmond_app)
-
-ganglia_apache_config_file = "/etc/apache2/conf.d/ganglia.conf"
-ganglia_web_path="/var/www/html/ganglia"
-if System.get_instance().os_family == "suse":
-  rrd_py_path = '/srv/www/cgi-bin'
-  dwoo_path = '/var/lib/ganglia-web/dwoo'
-  web_user = "wwwrun"
-  # for upgrade purposes as path to ganglia was changed
-  if not os.path.exists(ganglia_web_path):
-    ganglia_web_path='/srv/www/htdocs/ganglia'
-
-elif  System.get_instance().os_family == "redhat":
-  rrd_py_path = '/var/www/cgi-bin'
-  dwoo_path = '/var/lib/ganglia/dwoo'
-  web_user = "apache"
-elif  System.get_instance().os_family == "ubuntu":
-  rrd_py_path = '/usr/lib/cgi-bin'
-  ganglia_web_path = '/usr/share/ganglia-webfrontend'
-  dwoo_path = '/var/lib/ganglia/dwoo'
-  web_user = "www-data"

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/status_params.py
deleted file mode 100644
index 0c69ca9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['ganglia-env']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/ganglia.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/ganglia.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/ganglia.conf.j2
deleted file mode 100644
index a08fb31..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/ganglia.conf.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-Alias /ganglia "{{ganglia_web_path}}"
-
-<Directory "{{ganglia_web_path}}">
-#  SSLRequireSSL
-   Options None
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-#  AuthName "Ganglia Access"
-#  AuthType Basic
-#  AuthUserFile /etc/ganglia/htpasswd.users
-#  Require valid-user
-</Directory>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaClusters.conf.j2
deleted file mode 100644
index ffb4e84..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaClusters.conf.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#########################################################
-### ClusterName           GmondMasterHost   GmondPort ###
-#########################################################
-
-{% for x in ganglia_clusters %}
-    {{ x[0] }}       	  {{ganglia_server_host}}  {{ x[1] }}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaEnv.sh.j2
deleted file mode 100644
index 0b68623..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaEnv.sh.j2
+++ /dev/null
@@ -1,46 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Unix users and groups for the binaries we start up.
-RRD_ROOTDIR={{rrdcached_base_dir}}
-GMETAD_USER={{gmetad_user}};
-GMOND_USER={{gmond_user}};
-WEBSERVER_GROUP={{webserver_group}};
-MODULES_DIR={{modules_dir}}
-GANGLIA_WEB_PATH={{ganglia_web_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaLib.sh.j2
deleted file mode 100644
index 6c24c7f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/gangliaLib.sh.j2
+++ /dev/null
@@ -1,85 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-GANGLIA_CONF_DIR={{ganglia_conf_dir}};
-GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
-RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
-RRDCACHED_WRITE_THREADS={{rrdcached_write_threads}}
-RRDCACHED_TIMEOUT={{rrdcached_timeout}}
-RRDCACHED_FLUSH_TIMEOUT={{rrdcached_flush_timeout}}
-RRDCACHED_DELAY={{rrdcached_delay}}
-
-# This file contains all the info about each Ganglia Cluster in our Grid.
-GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
-
-function createDirectory()
-{
-    directoryPath=${1};
-
-    if [ "x" != "x${directoryPath}" ]
-    then
-        mkdir -p ${directoryPath};
-    fi
-}
-
-function getGangliaClusterInfo()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    else
-        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    fi
-}
-
-function getConfiguredGangliaClusterNames()
-{
-  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
-  # the subdirectory name from each.
-  if [ -e ${GANGLIA_CONF_DIR} ]
-  then  
-    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
-  fi
-}


[30/37] ambari git commit: AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/startHiveserver2.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/startHiveserver2.sh.j2 b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/startHiveserver2.sh.j2
new file mode 100644
index 0000000..70b418c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/templates/startHiveserver2.sh.j2
@@ -0,0 +1,24 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+HIVE_SERVER2_OPTS=" -hiveconf hive.log.file=hiveserver2.log -hiveconf hive.log.dir=$5"
+HIVE_CONF_DIR=$4 {{hive_bin}}/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/alerts.json b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/alerts.json
new file mode 100644
index 0000000..32d4238
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/alerts.json
@@ -0,0 +1,42 @@
+{
+  "OOZIE": {
+    "service": [],
+    "OOZIE_SERVER": [
+      {
+        "name": "oozie_server_webui",
+        "label": "Oozie Server Web UI",
+        "description": "This host-level alert is triggered if the Oozie server Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{oozie-site/oozie.base.url}}/oozie"
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.4f} seconds"
+            },
+            "warning":{
+              "text": "HTTP {0} response in {2:.4f} seconds"
+            },
+            "critical": {
+              "text": "Connection failed to {1}"
+            }
+          }
+        }
+      },
+      {
+        "name": "oozie_server_status",
+        "label": "Oozie Server Status",
+        "description": "This host-level alert is triggered if the Oozie server cannot be determined to be up and responding to client requests.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDP/2.0.6/services/OOZIE/package/files/alert_check_oozie_server.py"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-env.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-env.xml
new file mode 100644
index 0000000..65665bf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-env.xml
@@ -0,0 +1,139 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>oozie_user</name>
+    <value>oozie</value>
+    <property-type>USER</property-type>
+    <description>Oozie User.</description>
+  </property>
+  <property>
+    <name>oozie_database</name>
+    <value>New Derby Database</value>
+    <description>Oozie Server Database.</description>
+  </property>
+  <property>
+    <name>oozie_derby_database</name>
+    <value>Derby</value>
+    <description>Oozie Derby Database</description>
+  </property>
+  <property>
+    <name>oozie_data_dir</name>
+    <value>/hadoop/oozie/data</value>
+    <description>Data directory in which the Oozie DB exists</description>
+  </property>
+  <property>
+    <name>oozie_log_dir</name>
+    <value>/var/log/oozie</value>
+    <description>Directory for oozie logs</description>
+  </property>
+  <property>
+    <name>oozie_pid_dir</name>
+    <value>/var/run/oozie</value>
+    <description>Directory in which the pid files for oozie reside.</description>
+  </property>
+  <property>
+    <name>oozie_admin_port</name>
+    <value>11001</value>
+    <description>The admin port Oozie server runs.</description>
+  </property>
+  <property>
+    <name>oozie_heapsize</name>
+    <value>2048</value>
+    <description>Oozie heap size.</description>
+  </property>
+  <property>
+    <name>oozie_permsize</name>
+    <value>256</value>
+    <description>Oozie permanent generation size.</description>
+  </property>
+
+  <!-- oozie-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for oozie-env.sh file</description>
+    <value>
+#!/bin/bash
+
+if [ -d "/usr/lib/bigtop-tomcat" ]; then
+  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}
+  export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}
+  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
+  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
+fi
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+
+export JRE_HOME=${JAVA_HOME}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+export CATALINA_OPTS="$CATALINA_OPTS -Xmx{{oozie_heapsize}} -XX:MaxPermSize={{oozie_permsize}}"
+
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+export OOZIE_HTTP_PORT={{oozie_server_port}}
+
+# The admin port Oozie server runs
+#
+export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-log4j.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-log4j.xml
new file mode 100644
index 0000000..7f7158f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-log4j.xml
@@ -0,0 +1,97 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-site.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-site.xml
new file mode 100644
index 0000000..e72a8be
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/configuration/oozie-site.xml
@@ -0,0 +1,312 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+        
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <!--
+      Refer to the oozie-default.xml file for the complete list of
+      Oozie configuration properties and their default values.
+  -->
+  <property>
+    <name>oozie.base.url</name>
+    <value>http://localhost:11000/oozie</value>
+    <description>Base Oozie URL.</description>
+  </property>
+
+  <property>
+    <name>oozie.system.id</name>
+    <value>oozie-${user.name}</value>
+    <description>
+      The Oozie system ID.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.systemmode</name>
+    <value>NORMAL</value>
+    <description>
+      System mode for Oozie at startup.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.AuthorizationService.security.enabled</name>
+    <value>true</value>
+    <description>
+      Specifies whether security (user name/admin role) is enabled or not.
+      If disabled any user can manage Oozie system and manage any job.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.PurgeService.older.than</name>
+    <value>30</value>
+    <description>
+      Jobs older than this value, in days, will be purged by the PurgeService.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.PurgeService.purge.interval</name>
+    <value>3600</value>
+    <description>
+      Interval at which the purge service will run, in seconds.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.queue.size</name>
+    <value>1000</value>
+    <description>Max callable queue size</description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.threads</name>
+    <value>10</value>
+    <description>Number of threads used for executing callables</description>
+  </property>
+
+  <property>
+    <name>oozie.service.CallableQueueService.callable.concurrency</name>
+    <value>3</value>
+    <description>
+      Maximum concurrency for a given callable type.
+      Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+      Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+      All commands that use action executors (action-start, action-end, action-kill and action-check) use
+      the action type as the callable type.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.coord.normal.default.timeout</name>
+    <value>120</value>
+    <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+      -1 means infinite timeout
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.db.schema.name</name>
+    <value>oozie</value>
+    <description>
+      Oozie DataBase Name
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.authentication.type</name>
+    <value>simple</value>
+    <description>
+      Authentication used for Oozie HTTP endpoint, the supported values are: simple | kerberos |
+      #AUTHENTICATION_HANDLER_CLASSNAME#.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.WorkflowAppService.system.libpath</name>
+    <value>/user/${user.name}/share/lib</value>
+    <description>
+      System library path to use for workflow applications.
+      This path is added to workflow application if their job properties sets
+      the property 'oozie.use.system.libpath' to true.
+    </description>
+  </property>
+
+  <property>
+    <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+    <value>false</value>
+    <description>
+      If set to true, submissions of MapReduce and Pig jobs will include
+      automatically the system library path, thus not requiring users to
+      specify where the Pig JAR files are. Instead, the ones from the system
+      library path are used.
+    </description>
+  </property>
+  <property>
+    <name>oozie.authentication.kerberos.name.rules</name>
+    <value>
+      RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
+      RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
+      RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+      RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+      DEFAULT
+    </value>
+    <description>The mapping from kerberos principal names to local OS user names.</description>
+  </property>
+  <property>
+    <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+    <value>*=/etc/hadoop/conf</value>
+    <description>
+      Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+      the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+      used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+      the relevant Hadoop *-site.xml files. If the path is relative is looked within
+      the Oozie configuration directory; though the path can be absolute (i.e. to point
+      to Hadoop client conf/ directories in the local filesystem.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.ActionService.executor.ext.classes</name>
+    <value>
+      org.apache.oozie.action.email.EmailActionExecutor,
+      org.apache.oozie.action.hadoop.HiveActionExecutor,
+      org.apache.oozie.action.hadoop.ShellActionExecutor,
+      org.apache.oozie.action.hadoop.SqoopActionExecutor,
+      org.apache.oozie.action.hadoop.DistcpActionExecutor
+    </value>
+    <description>
+      List of ActionExecutors extension classes (separated by commas). Only action types with associated executors can
+      be used in workflows. This property is a convenience property to add extensions to the built in executors without
+      having to include all the built in ones.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.SchemaService.wf.ext.schemas</name>
+    <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd,hive-action-0.3.xsd</value>
+    <description>
+      Schemas for additional actions types. IMPORTANT: if there are no schemas leave a 1 space string, the service
+      trims the value, if empty Configuration assumes it is NULL.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.JPAService.create.db.schema</name>
+    <value>false</value>
+    <description>
+      Creates Oozie DB.
+
+      If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+      If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.driver</name>
+    <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+    <description>
+      JDBC driver class.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.url</name>
+    <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+    <description>
+      JDBC URL.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.jdbc.username</name>
+    <value>oozie</value>
+    <description>
+      Database user name to use to connect to the database
+    </description>
+  </property>
+
+  <property require-input = "true">
+    <name>oozie.service.JPAService.jdbc.password</name>
+    <value> </value>
+    <property-type>PASSWORD</property-type>
+    <description>
+      DB user password.
+
+      IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+      if empty Configuration assumes it is NULL.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.service.JPAService.pool.max.active.conn</name>
+    <value>10</value>
+    <description>
+      Max number of connections.
+    </description>
+  </property>
+
+  <property>
+    <name>oozie.services</name>
+    <value>
+      org.apache.oozie.service.SchedulerService,
+      org.apache.oozie.service.InstrumentationService,
+      org.apache.oozie.service.CallableQueueService,
+      org.apache.oozie.service.UUIDService,
+      org.apache.oozie.service.ELService,
+      org.apache.oozie.service.AuthorizationService,
+      org.apache.oozie.service.UserGroupInformationService,
+      org.apache.oozie.service.HadoopAccessorService,
+      org.apache.oozie.service.URIHandlerService,
+      org.apache.oozie.service.MemoryLocksService,
+      org.apache.oozie.service.DagXLogInfoService,
+      org.apache.oozie.service.SchemaService,
+      org.apache.oozie.service.LiteWorkflowAppService,
+      org.apache.oozie.service.JPAService,
+      org.apache.oozie.service.StoreService,
+      org.apache.oozie.service.CoordinatorStoreService,
+      org.apache.oozie.service.SLAStoreService,
+      org.apache.oozie.service.DBLiteWorkflowStoreService,
+      org.apache.oozie.service.CallbackService,
+      org.apache.oozie.service.ActionService,
+      org.apache.oozie.service.ActionCheckerService,
+      org.apache.oozie.service.RecoveryService,
+      org.apache.oozie.service.PurgeService,
+      org.apache.oozie.service.CoordinatorEngineService,
+      org.apache.oozie.service.BundleEngineService,
+      org.apache.oozie.service.DagEngineService,
+      org.apache.oozie.service.CoordMaterializeTriggerService,
+      org.apache.oozie.service.StatusTransitService,
+      org.apache.oozie.service.PauseTransitService,
+      org.apache.oozie.service.GroupsService,
+      org.apache.oozie.service.ProxyUserService
+    </value>
+    <description>List of Oozie services</description>
+  </property>
+  <property>
+    <name>oozie.service.URIHandlerService.uri.handlers</name>
+    <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
+    <description>
+      Enlist the different uri handlers supported for data availability checks.
+    </description>
+  </property>
+  <property>
+    <name>oozie.services.ext</name>
+    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService
+    </value>
+    <description>
+      To add/replace services defined in 'oozie.services' with custom implementations.
+      Class names must be separated by commas.
+    </description>
+  </property>
+  <property>
+    <name>oozie.service.coord.push.check.requeue.interval</name>
+    <value>30000</value>
+    <description>
+      Command re-queue interval for push dependencies (in millisecond).
+    </description>
+  </property>
+  <property>
+    <name>oozie.credentials.credentialclasses</name>
+    <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
+    <description>
+      Credential Class to be used for HCat.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/metainfo.xml
new file mode 100644
index 0000000..ec66213
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/metainfo.xml
@@ -0,0 +1,165 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <displayName>Oozie</displayName>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
+      </comment>
+      <version>4.0.0.2.0</version>
+      <components>
+        <component>
+          <name>OOZIE_SERVER</name>
+          <displayName>Oozie Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>OOZIE_CLIENT</name>
+          <displayName>Oozie Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>oozie-site.xml</fileName>
+              <dictionaryName>oozie-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>oozie-env.sh</fileName>
+              <dictionaryName>oozie-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>oozie-log4j.properties</fileName>
+              <dictionaryName>oozie-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>oozie</name>
+            </package>
+            <package>
+              <name>oozie-client</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+       <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>extjs-2.2-1</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>extjs</name>
+            </package>
+            <package>
+              <name>libxml2-utils</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>YARN</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>oozie-site</config-type>
+        <config-type>oozie-env</config-type>
+        <config-type>oozie-log4j</config-type>
+        <config-type>yarn-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/alert_check_oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/alert_check_oozie_server.py
new file mode 100644
index 0000000..7bf1255
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/alert_check_oozie_server.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import subprocess
+from subprocess import CalledProcessError
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+OOZIE_URL_KEY = '{{oozie-site/oozie.base.url}}'
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (OOZIE_URL_KEY)
+  
+
+def execute(parameters=None, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  parameters (dictionary): a mapping of parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if parameters is None:
+    return (RESULT_CODE_UNKNOWN, ['There were no parameters supplied to the script.'])
+
+  oozie_url = None
+  if OOZIE_URL_KEY in parameters:
+    oozie_url = parameters[OOZIE_URL_KEY]
+
+  if oozie_url is None:
+    return (RESULT_CODE_UNKNOWN, ['The Oozie URL is a required parameter.'])
+
+  try:
+    # oozie admin -oozie http://server:11000/oozie -status
+    oozie_process = subprocess.Popen(['oozie', 'admin', '-oozie',
+      oozie_url, '-status'], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
+
+    oozie_output, oozie_error = oozie_process.communicate()
+    oozie_return_code = oozie_process.returncode
+
+    if oozie_return_code == 0:
+      # strip trailing newlines
+      oozie_output = str(oozie_output).strip('\n')
+      return (RESULT_CODE_OK, [oozie_output])
+    else:
+      oozie_error = str(oozie_error).strip('\n')
+      return (RESULT_CODE_CRITICAL, [oozie_error])
+
+  except CalledProcessError, cpe:
+    return (RESULT_CODE_CRITICAL, [str(cpe)])

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/oozieSmoke2.sh
new file mode 100644
index 0000000..30d878c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/oozieSmoke2.sh
@@ -0,0 +1,114 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+os_family=$1
+shift
+
+function getValueFromField {
+  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
+  return $?
+}
+
+function checkOozieJobStatus {
+  local job_id=$1
+  local num_of_tries=$2
+  #default num_of_tries to 10 if not present
+  num_of_tries=${num_of_tries:-10}
+  local i=0
+  local rc=1
+  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
+  sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"
+  while [ $i -lt $num_of_tries ] ; do
+    cmd_output=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd"`
+    (IFS='';echo $cmd_output)
+    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
+    echo "workflow_status=$act_status"
+    if [ "RUNNING" == "$act_status" ]; then
+      #increment the counter and get the status again after waiting for 15 secs
+      sleep 15
+      (( i++ ))
+      elif [ "SUCCEEDED" == "$act_status" ]; then
+        rc=0;
+        break;
+      else
+        rc=1
+        break;
+      fi
+    done
+    return $rc
+}
+
+export oozie_conf_dir=$1
+export oozie_bin_dir=$2
+export hadoop_conf_dir=$3
+export hadoop_bin_dir=$4
+export smoke_test_user=$5
+export security_enabled=$6
+export smoke_user_keytab=$7
+export kinit_path_local=$8
+
+export OOZIE_EXIT_CODE=0
+export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml yarn.resourcemanager.address`
+export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.defaultFS`
+export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
+
+if [ "$os_family" == "ubuntu" ] ; then
+  LIST_PACKAGE_FILES_CMD='dpkg-query -L'
+else
+  LIST_PACKAGE_FILES_CMD='rpm -ql'
+fi
+  
+
+export OOZIE_EXAMPLES_DIR=`$LIST_PACKAGE_FILES_CMD oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
+if [[ -z "$OOZIE_EXAMPLES_DIR" ]] ; then
+  export OOZIE_EXAMPLES_DIR='/usr/hdp/current/oozie-client/doc/'
+fi
+cd $OOZIE_EXAMPLES_DIR
+
+sudo tar -zxf oozie-examples.tar.gz
+sudo chmod -R o+rx examples
+
+sudo sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sudo sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sudo sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sudo sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sudo sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sudo sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
+
+if [[ $security_enabled == "True" ]]; then
+  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else 
+  kinitcmd=""
+fi
+
+sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r examples"
+sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -rm -r input-data"
+sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
+sudo su ${smoke_test_user} -s /bin/bash - -c "${hadoop_bin_dir}/hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
+
+cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
+echo $cmd
+job_info=`sudo su ${smoke_test_user} -s /bin/bash - -c "$cmd" | grep "job:"`
+job_id="`echo $job_info | cut -d':' -f2`"
+checkOozieJobStatus "$job_id" 15
+OOZIE_EXIT_CODE="$?"
+exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/wrap_ooziedb.sh
new file mode 100644
index 0000000..36576b5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/files/wrap_ooziedb.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
+EC=$?
+echo $OUT
+GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
+if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
+then
+  exit 0
+else
+  exit $EC
+fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
new file mode 100644
index 0000000..a760ab2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -0,0 +1,206 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management import *
+
+def oozie(is_server=False # TODO: see if see can remove this
+              ):
+  import params
+
+  if is_server:
+    params.HdfsDirectory(params.oozie_hdfs_user_dir,
+                         action="create",
+                         owner=params.oozie_user,
+                         mode=params.oozie_hdfs_user_mode
+    )
+  Directory(params.conf_dir,
+             recursive = True,
+             owner = params.oozie_user,
+             group = params.user_group
+  )
+  XmlConfig("oozie-site.xml",
+    conf_dir = params.conf_dir,
+    configurations = params.config['configurations']['oozie-site'],
+    configuration_attributes=params.config['configuration_attributes']['oozie-site'],
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0664
+  )
+  File(format("{conf_dir}/oozie-env.sh"),
+    owner=params.oozie_user,
+    content=InlineTemplate(params.oozie_env_sh_template)
+  )
+
+  if (params.log4j_props != None):
+    File(format("{params.conf_dir}/oozie-log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user,
+      content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.conf_dir}/oozie-log4j.properties"))):
+    File(format("{params.conf_dir}/oozie-log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user
+    )
+
+  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
+    File(format("{params.conf_dir}/adminusers.txt"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.oozie_user,
+      content=Template('adminusers.txt.j2', oozie_user=params.oozie_user)
+    )
+  else:
+    File ( format("{params.conf_dir}/adminusers.txt"),
+           owner = params.oozie_user,
+           group = params.user_group
+    )
+
+  environment = {
+    "no_proxy": format("{ambari_server_hostname}")
+  }
+
+  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
+     params.jdbc_driver_name == "org.postgresql.Driver" or \
+     params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+    Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
+    curl -kf -x \"\" \
+    --retry 5 {jdk_location}{check_db_connection_jar_name}\
+     -o {check_db_connection_jar_name}'"),
+      not_if  = format("[ -f {check_db_connection_jar} ]"),
+      environment=environment
+    )
+  pass
+
+  oozie_ownership()
+  
+  if is_server:      
+    oozie_server_specific()
+  
+def oozie_ownership():
+  import params
+  
+  File ( format("{conf_dir}/hadoop-config.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/oozie-default.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  Directory ( format("{conf_dir}/action-conf"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/action-conf/hive.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+  
+def oozie_server_specific():
+  import params
+  
+  File(params.pid_file,
+    action="delete",
+    not_if="ls {pid_file} >/dev/null 2>&1 && !(ps `cat {pid_file}` >/dev/null 2>&1)"
+  )
+  
+  oozie_server_directorties = [format("{oozie_home}/{oozie_tmp_dir}"), params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir, params.oozie_webapps_conf_dir, params.oozie_server_dir]
+  Directory( oozie_server_directorties,
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0755,
+    recursive = True,
+    recursive_permission=True
+  )
+  
+  Directory(params.oozie_libext_dir,
+            recursive=True,
+  )
+  
+  configure_cmds = []  
+  configure_cmds.append(('tar','-xvf',format('{oozie_home}/oozie-sharelib.tar.gz'),'-C',params.oozie_home))
+  configure_cmds.append(('cp', params.ext_js_path, params.oozie_libext_dir))
+  configure_cmds.append(('chown', format('{oozie_user}:{user_group}'), format('{oozie_libext_dir}/{ext_js_file}')))
+  configure_cmds.append(('chown', '-RL', format('{oozie_user}:{user_group}'), params.oozie_webapps_conf_dir))
+  
+  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+  Execute( configure_cmds,
+    not_if  = no_op_test,
+    sudo = True,
+  )
+
+  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
+    Execute(('cp', params.jdbc_driver_jar, params.oozie_libext_dir),
+      not_if  = no_op_test,
+      sudo = True,
+    )
+  #falcon el extension
+  if params.has_falcon_host:
+    Execute(format('sudo cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-*.jar {oozie_libext_dir}'),
+      not_if  = no_op_test,
+    )
+    Execute(format('sudo chown {oozie_user}:{user_group} {oozie_libext_dir}/falcon-oozie-el-extension-*.jar'),
+      not_if  = no_op_test,
+    )
+  if params.lzo_enabled:
+    Package(params.lzo_packages_for_current_host)
+    Execute(format('sudo cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
+      not_if  = no_op_test,
+    )
+
+  Execute(format("cd {oozie_tmp_dir} && {oozie_setup_sh} prepare-war"),
+    user = params.oozie_user,
+    not_if  = no_op_test
+  )
+
+  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
+    # Create hive-site and tez-site configs for oozie
+    Directory(params.hive_conf_dir,
+        recursive = True,
+        owner = params.oozie_user,
+        group = params.user_group
+    )
+    if 'hive-site' in params.config['configurations']:
+      XmlConfig("hive-site.xml",
+        conf_dir=params.hive_conf_dir,
+        configurations=params.config['configurations']['hive-site'],
+        configuration_attributes=params.config['configuration_attributes']['hive-site'],
+        owner=params.oozie_user,
+        group=params.user_group,
+        mode=0644
+    )
+    if 'tez-site' in params.config['configurations']:
+      XmlConfig( "tez-site.xml",
+        conf_dir = params.hive_conf_dir,
+        configurations = params.config['configurations']['tez-site'],
+        configuration_attributes=params.config['configuration_attributes']['tez-site'],
+        owner = params.oozie_user,
+        group = params.user_group,
+        mode = 0664
+    )
+  pass
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
new file mode 100644
index 0000000..f77a8db
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+         
+class OozieClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=False)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+    
+if __name__ == "__main__":
+  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
new file mode 100644
index 0000000..f07e36d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+         
+class OozieServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=True)
+    
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    #TODO remove this when config command will be implemented
+    self.configure(env)
+    oozie_service(action='start')
+    
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    oozie_service(action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+    
+if __name__ == "__main__":
+  OozieServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_service.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_service.py
new file mode 100644
index 0000000..a1d7bad
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_service.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management import *
+
+def oozie_service(action = 'start'): # 'start' or 'stop'
+  import params
+
+  kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
+  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+  
+  if action == 'start':
+    start_cmd = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-start.sh")
+    
+    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or \
+       params.jdbc_driver_name == "org.postgresql.Driver" or \
+       params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification '{oozie_jdbc_connection_url}' {oozie_metastore_user_name} {oozie_metastore_user_passwd!p} {jdbc_driver_name}")
+    else:
+      db_connection_check_command = None
+      
+    cmd1 =  format("cd {oozie_tmp_dir} && {oozie_home}/bin/ooziedb.sh create -sqlfile oozie.sql -run")
+    cmd2 =  format("{kinit_if_needed} {put_shared_lib_to_hdfs_cmd} ; hadoop --config {hadoop_conf_dir} dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
+
+    if not os.path.isfile(params.jdbc_driver_jar) and params.jdbc_driver_name == "org.postgresql.Driver":
+      print format("ERROR: jdbc file {jdbc_driver_jar} is unavailable. Please, follow next steps:\n" \
+        "1) Download postgresql-9.0-801.jdbc4.jar.\n2) Create needed directory: mkdir -p {oozie_home}/libserver/\n" \
+        "3) Copy postgresql-9.0-801.jdbc4.jar to newly created dir: cp /path/to/jdbc/postgresql-9.0-801.jdbc4.jar " \
+        "{oozie_home}/libserver/\n4) Copy postgresql-9.0-801.jdbc4.jar to libext: cp " \
+        "/path/to/jdbc/postgresql-9.0-801.jdbc4.jar {oozie_home}/libext/\n")
+      exit(1)
+
+    if db_connection_check_command:
+      Execute( db_connection_check_command, tries=5, try_sleep=10)
+                  
+    Execute( cmd1,
+      user = params.oozie_user,
+      not_if  = no_op_test,
+      ignore_failures = True
+    ) 
+    
+    Execute( cmd2,
+      user = params.oozie_user,
+      not_if = format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'"),
+      path = params.execute_path
+    )
+    
+    Execute( start_cmd,
+      user = params.oozie_user,
+      not_if  = no_op_test,
+    )
+  elif action == 'stop':
+    stop_cmd  = format("cd {oozie_tmp_dir} && {oozie_home}/bin/oozie-stop.sh")
+    Execute(stop_cmd,
+      only_if  = no_op_test,
+      user = params.oozie_user
+    )
+    File(params.pid_file,
+         action = "delete",
+    )
+
+  
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params.py
new file mode 100644
index 0000000..b5f2ea7
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management import *
+import status_params
+import itertools
+import os
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+
+#hadoop params
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
+  hadoop_lib_home = "/usr/hdp/current/hadoop-client/lib"
+  oozie_lib_dir = "/usr/hdp/current/oozie-client/"
+  oozie_setup_sh = "/usr/hdp/current/oozie-client/bin/oozie-setup.sh"
+  oozie_webapps_dir = "/usr/hdp/current/oozie-client/oozie-server/webapps"
+  oozie_webapps_conf_dir = "/usr/hdp/current/oozie-client/oozie-server/conf"
+  oozie_libext_dir = "/usr/hdp/current/oozie-client/libext"
+  oozie_server_dir = "/usr/hdp/current/oozie-client/oozie-server"
+  oozie_shared_lib = "/usr/hdp/current/oozie-client/share"
+  oozie_home = "/usr/hdp/current/oozie-client"
+  oozie_bin_dir = "/usr/hdp/current/oozie-client/bin"
+  falcon_home = '/usr/hdp/current/falcon-client'
+else:
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_lib_home = "/usr/lib/hadoop/lib"
+  oozie_lib_dir = "/var/lib/oozie/"
+  oozie_setup_sh = "/usr/lib/oozie/bin/oozie-setup.sh"
+  oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
+  oozie_webapps_conf_dir = "/var/lib/oozie/oozie-server/conf"
+  oozie_libext_dir = "/usr/lib/oozie/libext"
+  oozie_server_dir = "/var/lib/oozie/oozie-server"
+  oozie_shared_lib = "/usr/lib/oozie/share"
+  oozie_home = "/usr/lib/oozie"
+  oozie_bin_dir = "/usr/bin"
+  falcon_home = '/usr/lib/falcon'
+
+execute_path = oozie_bin_dir + os.pathsep + hadoop_bin_dir
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+conf_dir = "/etc/oozie/conf"
+hive_conf_dir = "/etc/oozie/conf/action-conf/hive"
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+user_group = config['configurations']['cluster-env']['user_group']
+jdk_location = config['hostLevelParams']['jdk_location']
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+oozie_tmp_dir = "/var/tmp/oozie"
+oozie_hdfs_user_dir = format("/user/{oozie_user}")
+oozie_pid_dir = status_params.oozie_pid_dir
+pid_file = status_params.pid_file
+hadoop_jar_location = "/usr/lib/hadoop/"
+# for HDP1 it's "/usr/share/HDP-oozie/ext.zip"
+ext_js_file = "ext-2.2.zip"
+ext_js_path = format("/usr/share/HDP-oozie/{ext_js_file}")
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+oozie_heapsize = config['configurations']['oozie-env']['oozie_heapsize']
+oozie_permsize = config['configurations']['oozie-env']['oozie_permsize']
+
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
+oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
+smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+oozie_keytab = config['configurations']['oozie-env']['oozie_keytab']
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
+
+oracle_driver_jar_name = "ojdbc6.jar"
+
+java_home = config['hostLevelParams']['java_home']
+oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
+oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
+oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
+oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
+oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
+oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
+oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
+fs_root = config['configurations']['core-site']['fs.defaultFS']
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.0') >= 0 and compare_versions(hdp_stack_version, '2.2') < 0:
+  put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
+# for newer
+else:
+  put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
+  
+jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+
+if jdbc_driver_name == "com.mysql.jdbc.Driver":
+  jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
+elif jdbc_driver_name == "org.postgresql.Driver":
+  jdbc_driver_jar = format("{oozie_home}/libserver/postgresql-9.0-801.jdbc4.jar")
+elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+  jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
+else:
+  jdbc_driver_jar = ""
+
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+has_falcon_host = not len(falcon_host)  == 0
+
+#oozie-log4j.properties
+if (('oozie-log4j' in config['configurations']) and ('content' in config['configurations']['oozie-log4j'])):
+  log4j_props = config['configurations']['oozie-log4j']['content']
+else:
+  log4j_props = None
+
+oozie_hdfs_user_mode = 0775
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+import functools
+#create partial functions with common arguments for every HdfsDirectory call
+#to create hdfs directory we need to call params.HdfsDirectory in code
+HdfsDirectory = functools.partial(
+  HdfsDirectory,
+  conf_dir=hadoop_conf_dir,
+  hdfs_user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
+)
+
+#LZO support
+
+io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
+lzo_enabled = "com.hadoop.compression.lzo" in io_compression_codecs
+# stack_is_hdp22_or_further
+underscorred_version = stack_version_unformatted.replace('.', '_')
+dashed_version = stack_version_unformatted.replace('.', '-')
+lzo_packages_to_family = {
+  "any": ["hadoop-lzo"],
+  "redhat": ["lzo", "hadoop-lzo-native"],
+  "suse": ["lzo", "hadoop-lzo-native"],
+  "ubuntu": ["liblzo2-2"]
+}
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  lzo_packages_to_family["redhat"] += [format("hadooplzo_{underscorred_version}_*")]
+  lzo_packages_to_family["suse"] += [format("hadooplzo_{underscorred_version}_*")]
+  lzo_packages_to_family["ubuntu"] += [format("hadooplzo_{dashed_version}_*")]
+
+lzo_packages_for_current_host = lzo_packages_to_family['any'] + lzo_packages_to_family[System.get_instance().os_family]
+all_lzo_packages = set(itertools.chain(*lzo_packages_to_family.values()))
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py
new file mode 100644
index 0000000..40f8b8d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class OozieServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    # on HDP1 this file is different
+    smoke_test_file_name = 'oozieSmoke2.sh'
+
+    oozie_smoke_shell_file( smoke_test_file_name)
+  
+def oozie_smoke_shell_file(
+  file_name
+):
+  import params
+
+  File( format("{tmp_dir}/{file_name}"),
+    content = StaticFile(file_name),
+    mode = 0755
+  )
+  
+  os_family = System.get_instance().os_family
+  
+  if params.security_enabled:
+    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
+  else:
+    sh_cmd = format("{tmp_dir}/{file_name} {os_family} {conf_dir} {oozie_bin_dir} {hadoop_conf_dir} {hadoop_bin_dir} {smokeuser} {security_enabled}")
+
+  Execute( format("{tmp_dir}/{file_name}"),
+    command   = sh_cmd,
+    path      = params.execute_path,
+    tries     = 3,
+    try_sleep = 5,
+    logoutput = True
+  )
+    
+if __name__ == "__main__":
+  OozieServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
new file mode 100644
index 0000000..a665449
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
+pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/adminusers.txt.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/adminusers.txt.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/adminusers.txt.j2
new file mode 100644
index 0000000..9feae39
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/adminusers.txt.j2
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Users should be set using following rules:
+#
+#     One user name per line
+#     Empty lines and lines starting with '#' are ignored
+
+{{oozie_user}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/oozie-log4j.properties.j2
new file mode 100644
index 0000000..8c9f25e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/templates/oozie-log4j.properties.j2
@@ -0,0 +1,92 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - SERVER[${oozie.instance.id}] %m%n
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/alerts.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/alerts.json
deleted file mode 100644
index 3762492..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/alerts.json
+++ /dev/null
@@ -1,60 +0,0 @@
-{
-  "HIVE": {
-    "service": [],
-    "HIVE_METASTORE": [
-      {
-        "name": "hive_metastore_process",
-        "label": "Hive Metastore Process",
-        "description": "This host-level alert is triggered if the Hive Metastore process cannot be determined to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "PORT",
-          "uri": "{{hive-site/hive.metastore.uris}}",
-          "default_port": 9083,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ],
-    "HIVE_SERVER": [
-      {
-        "name": "hive_server_process",
-        "label": "HiveServer2 Process",
-        "description": "This host-level alert is triggered if the HiveServer cannot be determined to be up and responding to client requests.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDP/2.0.6/services/HIVE/package/files/alert_hive_thrift_port.py"
-        }
-      }
-    ],
-    "WEBHCAT_SERVER": [
-      {
-        "name": "hive_webhcat_server_status",
-        "label": "WebHCat Server Status",
-        "description": "This host-level alert is triggered if the templeton server status is not healthy.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDP/2.0.6/services/HIVE/package/files/alert_webhcat_server.py"
-        }
-      }    
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hcat-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hcat-env.xml
deleted file mode 100644
index 91b402b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hcat-env.xml
+++ /dev/null
@@ -1,57 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <!-- hcat-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for hcat-env.sh file</description>
-    <value>
-      # Licensed to the Apache Software Foundation (ASF) under one
-      # or more contributor license agreements. See the NOTICE file
-      # distributed with this work for additional information
-      # regarding copyright ownership. The ASF licenses this file
-      # to you under the Apache License, Version 2.0 (the
-      # "License"); you may not use this file except in compliance
-      # with the License. You may obtain a copy of the License at
-      #
-      # http://www.apache.org/licenses/LICENSE-2.0
-      #
-      # Unless required by applicable law or agreed to in writing, software
-      # distributed under the License is distributed on an "AS IS" BASIS,
-      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      # See the License for the specific language governing permissions and
-      # limitations under the License.
-
-      JAVA_HOME={{java64_home}}
-      HCAT_PID_DIR={{hcat_pid_dir}}/
-      HCAT_LOG_DIR={{hcat_log_dir}}/
-      HCAT_CONF_DIR={{hcat_conf_dir}}
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-      #DBROOT is the path where the connector jars are downloaded
-      DBROOT={{hcat_dbroot}}
-      USER={{hcat_user}}
-      METASTORE_PORT={{hive_metastore_port}}
-    </value>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-env.xml
deleted file mode 100644
index 52cfa10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-env.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hive_database_type</name>
-    <value>mysql</value>
-    <description>Default HIVE DB type.</description>
-  </property>
-  <property>
-    <name>hive_database</name>
-    <value>New MySQL Database</value>
-    <description>
-      Property that determines whether the HIVE DB is managed by Ambari.
-    </description>
-  </property>
-  <property>
-    <name>hive_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database type.</description>
-  </property>
-  <property>
-    <name>hive_database_name</name>
-    <value>hive</value>
-    <description>Database name.</description>
-  </property>
-  <property>
-    <name>hive_dbroot</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive DB Directory.</description>
-  </property>
-  <property>
-    <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
-    <description>Directory for Hive Log files.</description>
-  </property>
-  <property>
-    <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>hive_user</name>
-    <value>hive</value>
-    <property-type>USER</property-type>
-    <description>Hive User.</description>
-  </property>
-
-  <!--HCAT-->
-
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/var/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <property-type>USER</property-type>
-    <description>HCat User.</description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <property-type>USER</property-type>
-    <description>WebHCat User.</description>
-  </property>
-  
-  <!-- hive-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for hive-env.sh file</description>
-    <value>
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="{{hive_heapsize}}"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR={{hive_config_dir}}
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-elif [ -d "/usr/lib/hive-hcatalog/" ]; then
-  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar
-else
-  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar
-fi
-export METASTORE_PORT={{hive_metastore_port}}
-    </value>
-  </property>
-  
-</configuration>


[24/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)

Posted by jl...@apache.org.
AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b4cd4cb7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b4cd4cb7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b4cd4cb7

Branch: refs/heads/trunk
Commit: b4cd4cb71ffe406d1d75824ea29880e9f824ae20
Parents: 17b7155
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Wed Dec 17 11:58:43 2014 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Dec 17 11:58:43 2014 -0800

----------------------------------------------------------------------
 .../common-services/FLUME/1.4.0.2.0/alerts.json |    18 +
 .../1.4.0.2.0/configuration/flume-conf.xml      |    33 +
 .../FLUME/1.4.0.2.0/configuration/flume-env.xml |    79 +
 .../FLUME/1.4.0.2.0/metainfo.xml                |    68 +
 .../FLUME/1.4.0.2.0/metrics.json                |   716 +
 .../package/files/alert_flume_agent_status.py   |    99 +
 .../FLUME/1.4.0.2.0/package/scripts/flume.py    |   206 +
 .../1.4.0.2.0/package/scripts/flume_check.py    |    40 +
 .../1.4.0.2.0/package/scripts/flume_handler.py  |    79 +
 .../FLUME/1.4.0.2.0/package/scripts/params.py   |    80 +
 .../templates/flume-metrics2.properties.j2      |    22 +
 .../1.4.0.2.0/package/templates/flume.conf.j2   |    24 +
 .../package/templates/log4j.properties.j2       |    67 +
 .../common-services/GANGLIA/3.5.0/alerts.json   |   137 +
 .../GANGLIA/3.5.0/configuration/ganglia-env.xml |    77 +
 .../common-services/GANGLIA/3.5.0/metainfo.xml  |   127 +
 .../GANGLIA/3.5.0/package/files/checkGmetad.sh  |    37 +
 .../GANGLIA/3.5.0/package/files/checkGmond.sh   |    62 +
 .../3.5.0/package/files/checkRrdcached.sh       |    34 +
 .../GANGLIA/3.5.0/package/files/gmetad.init     |    73 +
 .../GANGLIA/3.5.0/package/files/gmetadLib.sh    |   204 +
 .../GANGLIA/3.5.0/package/files/gmond.init      |    73 +
 .../GANGLIA/3.5.0/package/files/gmondLib.sh     |   538 +
 .../GANGLIA/3.5.0/package/files/rrdcachedLib.sh |    47 +
 .../GANGLIA/3.5.0/package/files/setupGanglia.sh |   141 +
 .../GANGLIA/3.5.0/package/files/startGmetad.sh  |    68 +
 .../GANGLIA/3.5.0/package/files/startGmond.sh   |    85 +
 .../3.5.0/package/files/startRrdcached.sh       |    79 +
 .../GANGLIA/3.5.0/package/files/stopGmetad.sh   |    43 +
 .../GANGLIA/3.5.0/package/files/stopGmond.sh    |    54 +
 .../3.5.0/package/files/stopRrdcached.sh        |    41 +
 .../3.5.0/package/files/teardownGanglia.sh      |    28 +
 .../GANGLIA/3.5.0/package/scripts/functions.py  |    31 +
 .../GANGLIA/3.5.0/package/scripts/ganglia.py    |    97 +
 .../3.5.0/package/scripts/ganglia_monitor.py    |   243 +
 .../package/scripts/ganglia_monitor_service.py  |    27 +
 .../3.5.0/package/scripts/ganglia_server.py     |   123 +
 .../package/scripts/ganglia_server_service.py   |    27 +
 .../GANGLIA/3.5.0/package/scripts/params.py     |   164 +
 .../3.5.0/package/scripts/status_params.py      |    25 +
 .../3.5.0/package/templates/ganglia.conf.j2     |    34 +
 .../package/templates/gangliaClusters.conf.j2   |    43 +
 .../3.5.0/package/templates/gangliaEnv.sh.j2    |    46 +
 .../3.5.0/package/templates/gangliaLib.sh.j2    |    85 +
 .../GANGLIA/3.5.0/package/templates/rrd.py.j2   |   361 +
 .../HBASE/0.96.0.2.0/alerts.json                |   124 +
 .../0.96.0.2.0/configuration/hbase-env.xml      |   135 +
 .../0.96.0.2.0/configuration/hbase-log4j.xml    |   143 +
 .../0.96.0.2.0/configuration/hbase-policy.xml   |    53 +
 .../0.96.0.2.0/configuration/hbase-site.xml     |   331 +
 .../HBASE/0.96.0.2.0/metainfo.xml               |   144 +
 .../HBASE/0.96.0.2.0/metrics.json               | 13635 +++++++++++++++++
 .../package/files/draining_servers.rb           |   164 +
 .../package/files/hbaseSmokeVerify.sh           |    34 +
 .../0.96.0.2.0/package/scripts/__init__.py      |    19 +
 .../0.96.0.2.0/package/scripts/functions.py     |    40 +
 .../HBASE/0.96.0.2.0/package/scripts/hbase.py   |   158 +
 .../0.96.0.2.0/package/scripts/hbase_client.py  |    43 +
 .../package/scripts/hbase_decommission.py       |    74 +
 .../0.96.0.2.0/package/scripts/hbase_master.py  |    76 +
 .../package/scripts/hbase_regionserver.py       |    78 +
 .../0.96.0.2.0/package/scripts/hbase_service.py |    51 +
 .../HBASE/0.96.0.2.0/package/scripts/params.py  |   147 +
 .../0.96.0.2.0/package/scripts/service_check.py |    79 +
 .../0.96.0.2.0/package/scripts/status_params.py |    26 +
 .../HBASE/0.96.0.2.0/package/scripts/upgrade.py |    49 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |   108 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |   102 +
 .../package/templates/hbase-smoke.sh.j2         |    44 +
 .../package/templates/hbase_client_jaas.conf.j2 |    23 +
 .../templates/hbase_grant_permissions.j2        |    39 +
 .../package/templates/hbase_master_jaas.conf.j2 |    26 +
 .../templates/hbase_regionserver_jaas.conf.j2   |    26 +
 .../package/templates/regionservers.j2          |    20 +
 .../stacks/HDP/2.0.6/services/FLUME/alerts.json |    18 -
 .../services/FLUME/configuration/flume-conf.xml |    33 -
 .../services/FLUME/configuration/flume-env.xml  |    79 -
 .../HDP/2.0.6/services/FLUME/metainfo.xml       |    44 +-
 .../HDP/2.0.6/services/FLUME/metrics.json       |   716 -
 .../package/files/alert_flume_agent_status.py   |    99 -
 .../services/FLUME/package/scripts/flume.py     |   206 -
 .../FLUME/package/scripts/flume_check.py        |    40 -
 .../FLUME/package/scripts/flume_handler.py      |    79 -
 .../services/FLUME/package/scripts/params.py    |    80 -
 .../templates/flume-metrics2.properties.j2      |    22 -
 .../FLUME/package/templates/flume.conf.j2       |    24 -
 .../FLUME/package/templates/log4j.properties.j2 |    67 -
 .../HDP/2.0.6/services/GANGLIA/alerts.json      |   137 -
 .../GANGLIA/configuration/ganglia-env.xml       |    77 -
 .../HDP/2.0.6/services/GANGLIA/metainfo.xml     |   103 +-
 .../GANGLIA/package/files/checkGmetad.sh        |    37 -
 .../GANGLIA/package/files/checkGmond.sh         |    62 -
 .../GANGLIA/package/files/checkRrdcached.sh     |    34 -
 .../services/GANGLIA/package/files/gmetad.init  |    73 -
 .../services/GANGLIA/package/files/gmetadLib.sh |   204 -
 .../services/GANGLIA/package/files/gmond.init   |    73 -
 .../services/GANGLIA/package/files/gmondLib.sh  |   538 -
 .../GANGLIA/package/files/rrdcachedLib.sh       |    47 -
 .../GANGLIA/package/files/setupGanglia.sh       |   141 -
 .../GANGLIA/package/files/startGmetad.sh        |    68 -
 .../GANGLIA/package/files/startGmond.sh         |    85 -
 .../GANGLIA/package/files/startRrdcached.sh     |    79 -
 .../GANGLIA/package/files/stopGmetad.sh         |    43 -
 .../services/GANGLIA/package/files/stopGmond.sh |    54 -
 .../GANGLIA/package/files/stopRrdcached.sh      |    41 -
 .../GANGLIA/package/files/teardownGanglia.sh    |    28 -
 .../GANGLIA/package/scripts/functions.py        |    31 -
 .../services/GANGLIA/package/scripts/ganglia.py |    97 -
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   243 -
 .../package/scripts/ganglia_monitor_service.py  |    27 -
 .../GANGLIA/package/scripts/ganglia_server.py   |   123 -
 .../package/scripts/ganglia_server_service.py   |    27 -
 .../services/GANGLIA/package/scripts/params.py  |   164 -
 .../GANGLIA/package/scripts/status_params.py    |    25 -
 .../GANGLIA/package/templates/ganglia.conf.j2   |    34 -
 .../package/templates/gangliaClusters.conf.j2   |    43 -
 .../GANGLIA/package/templates/gangliaEnv.sh.j2  |    46 -
 .../GANGLIA/package/templates/gangliaLib.sh.j2  |    85 -
 .../GANGLIA/package/templates/rrd.py.j2         |   361 -
 .../stacks/HDP/2.0.6/services/HBASE/alerts.json |   124 -
 .../services/HBASE/configuration/hbase-env.xml  |   135 -
 .../HBASE/configuration/hbase-log4j.xml         |   143 -
 .../HBASE/configuration/hbase-policy.xml        |    53 -
 .../services/HBASE/configuration/hbase-site.xml |   331 -
 .../HDP/2.0.6/services/HBASE/metainfo.xml       |   120 +-
 .../HDP/2.0.6/services/HBASE/metrics.json       | 13635 -----------------
 .../HBASE/package/files/draining_servers.rb     |   164 -
 .../HBASE/package/files/hbaseSmokeVerify.sh     |    34 -
 .../services/HBASE/package/scripts/__init__.py  |    19 -
 .../services/HBASE/package/scripts/functions.py |    40 -
 .../services/HBASE/package/scripts/hbase.py     |   158 -
 .../HBASE/package/scripts/hbase_client.py       |    43 -
 .../HBASE/package/scripts/hbase_decommission.py |    74 -
 .../HBASE/package/scripts/hbase_master.py       |    76 -
 .../HBASE/package/scripts/hbase_regionserver.py |    78 -
 .../HBASE/package/scripts/hbase_service.py      |    51 -
 .../services/HBASE/package/scripts/params.py    |   147 -
 .../HBASE/package/scripts/service_check.py      |    79 -
 .../HBASE/package/scripts/status_params.py      |    26 -
 .../services/HBASE/package/scripts/upgrade.py   |    49 -
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |   108 -
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |   102 -
 .../HBASE/package/templates/hbase-smoke.sh.j2   |    44 -
 .../package/templates/hbase_client_jaas.conf.j2 |    23 -
 .../templates/hbase_grant_permissions.j2        |    39 -
 .../package/templates/hbase_master_jaas.conf.j2 |    26 -
 .../templates/hbase_regionserver_jaas.conf.j2   |    26 -
 .../HBASE/package/templates/regionservers.j2    |    20 -
 .../python/stacks/2.0.6/FLUME/test_flume.py     |    79 +-
 .../stacks/2.0.6/FLUME/test_service_check.py    |     8 +-
 .../2.0.6/GANGLIA/test_ganglia_monitor.py       |    26 +-
 .../stacks/2.0.6/GANGLIA/test_ganglia_server.py |    20 +-
 .../stacks/2.0.6/HBASE/test_hbase_client.py     |    16 +-
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |    57 +-
 .../2.0.6/HBASE/test_hbase_regionserver.py      |    39 +-
 .../2.0.6/HBASE/test_hbase_service_check.py     |    14 +-
 156 files changed, 20959 insertions(+), 20780 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/alerts.json b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/alerts.json
new file mode 100644
index 0000000..80244d2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/alerts.json
@@ -0,0 +1,18 @@
+{
+  "FLUME": {
+    "service": [],
+    "FLUME_HANDLER": [
+      {
+        "name": "flume_agent_status",
+        "label": "Flume Agent Status",
+        "description": "This host-level alert is triggerd if any of the expected flume agent processes are not available.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDP/2.0.6/services/FLUME/package/files/alert_flume_agent_status.py"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-conf.xml b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-conf.xml
new file mode 100644
index 0000000..8ff764b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-conf.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+  <property>
+    <name>content</name>
+    <description>Flume agent configurations. 
+    Specifying the configuration here applies to all hosts in this configuration-group. Please use host specific configuration-groups to provide different configurations on different hosts. 
+    For configuring multiple agents on the same host, provide the combined agent configurations here. Each agent should have a different name.</description>
+    <value>
+# Flume agent config
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-env.xml b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-env.xml
new file mode 100644
index 0000000..bb56f9c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/configuration/flume-env.xml
@@ -0,0 +1,79 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>flume_conf_dir</name>
+    <value>/etc/flume/conf</value>
+    <description>Location to save configuration files</description>
+  </property>
+  <property>
+    <name>flume_log_dir</name>
+    <value>/var/log/flume</value>
+    <description>Location to save log files</description>
+  </property>
+  <property>
+    <name>flume_user</name>
+    <value>flume</value>
+    <property-type>USER</property-type>
+    <description>Flume User</description>
+  </property>
+
+  <!-- flume-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for flume-env.sh file</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
+# during Flume startup.
+
+# Enviroment variables can be set here.
+
+export JAVA_HOME={{java_home}}
+
+# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
+# export JAVA_OPTS="-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote"
+
+# Note that the Flume conf directory is always included in the classpath.
+#TODO temporary addition
+export FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/*
+
+export HIVE_HOME={{flume_hive_home}}
+export HCAT_HOME={{flume_hcat_home}}
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml
new file mode 100644
index 0000000..cb05b02
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metainfo.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FLUME</name>
+      <displayName>Flume</displayName>
+      <comment>A distributed service for collecting, aggregating, and moving large amounts of streaming data into HDFS</comment>
+      <version>1.4.0.2.0</version>
+      <components>
+        <component>
+          <name>FLUME_HANDLER</name>
+          <displayName>Flume</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/flume_handler.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>flume</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/flume_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>flume-env</config-type>
+        <config-type>flume-conf</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metrics.json b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metrics.json
new file mode 100644
index 0000000..2114c12
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/metrics.json
@@ -0,0 +1,716 @@
+{
+  "FLUME_HANDLER": {
+    "Component": [
+        {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/boottime":{
+            "metric":"boottime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_aidle":{
+            "metric":"cpu_aidle",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_idle":{
+            "metric":"cpu_idle",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_nice":{
+            "metric":"cpu_nice",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_num":{
+            "metric":"cpu_num",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_speed":{
+            "metric":"cpu_speed",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_system":{
+            "metric":"cpu_system",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_user":{
+            "metric":"cpu_user",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_wio":{
+            "metric":"cpu_wio",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/disk/disk_free":{
+            "metric":"disk_free",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/disk/disk_total":{
+            "metric":"disk_total",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/disk/part_max_used":{
+            "metric":"part_max_used",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/gcCount":{
+            "metric":"jvm.metrics.gcCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/gcTimeMillis":{
+            "metric":"jvm.metrics.gcTimeMillis",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/logError":{
+            "metric":"jvm.metrics.logError",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/logFatal":{
+            "metric":"jvm.metrics.logFatal",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/logInfo":{
+            "metric":"jvm.metrics.logInfo",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/logWarn":{
+            "metric":"jvm.metrics.logWarn",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/maxMemoryM":{
+            "metric":"jvm.metrics.maxMemoryM",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/memHeapCommittedM":{
+            "metric":"jvm.metrics.memHeapCommittedM",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/memHeapUsedM":{
+            "metric":"jvm.metrics.memHeapUsedM",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/memNonHeapCommittedM":{
+            "metric":"jvm.metrics.memNonHeapCommittedM",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/memNonHeapUsedM":{
+            "metric":"jvm.metrics.memNonHeapUsedM",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsBlocked":{
+            "metric":"jvm.metrics.threadsBlocked",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsNew":{
+            "metric":"jvm.metrics.threadsNew",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsRunnable":{
+            "metric":"jvm.metrics.threadsRunnable",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsTerminated":{
+            "metric":"jvm.metrics.threadsTerminated",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsTimedWaiting":{
+            "metric":"jvm.metrics.threadsTimedWaiting",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsWaiting":{
+            "metric":"jvm.metrics.threadsWaiting",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/ChannelCapacity":{
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelCapacity",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/StartTime":{
+            "metric":"(\\w+).CHANNEL.(\\w+).StartTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/EventTakeAttemptCount":{
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeAttemptCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/EventTakeSuccessCount":{
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/EventPutAttemptCount":{
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutAttemptCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/StopTime":{
+            "metric":"(\\w+).CHANNEL.(\\w+).StopTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/ChannelFillPercentage":{
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelFillPercentage",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/ChannelSize":{
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/EventPutSuccessCount":{
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/ConnectionCreatedCount":{
+            "metric":"(\\w+).SINK.(\\w+).ConnectionCreatedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/BatchCompleteCount":{
+            "metric":"(\\w+).SINK.(\\w+).BatchCompleteCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/EventDrainSuccessCount":{
+            "metric":"(\\w+).SINK.(\\w+).EventDrainSuccessCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/StartTime":{
+            "metric":"(\\w+).SINK.(\\w+).StartTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/EventDrainAttemptCount":{
+            "metric":"(\\w+).SINK.(\\w+).EventDrainAttemptCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/ConnectionFailedCount":{
+            "metric":"(\\w+).SINK.(\\w+).ConnectionFailedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/BatchUnderflowCount":{
+            "metric":"(\\w+).SINK.(\\w+).BatchUnderflowCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/ConnectionClosedCount":{
+            "metric":"(\\w+).SINK.(\\w+).ConnectionClosedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/StopTime":{
+            "metric":"(\\w+).SINK.(\\w+).StopTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/BatchEmptyCount":{
+            "metric":"(\\w+).SINK.(\\w+).BatchEmptyCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/AppendBatchReceivedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).AppendBatchReceivedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/AppendAcceptedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).AppendAcceptedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/StartTime":{
+            "metric":"(\\w+).SOURCE.(\\w+).StartTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/OpenConnectionCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).OpenConnectionCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/AppendBatchAcceptedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).AppendBatchAcceptedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/AppendReceivedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).AppendReceivedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/EventReceivedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).EventReceivedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/StopTime":{
+            "metric":"(\\w+).SOURCE.(\\w+).StopTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/EventAcceptedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._sum",
+            "pointInTime":false,
+            "temporal":true
+          },
+
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._sum",
+            "pointInTime":false,
+            "temporal":true
+          },
+          
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._sum",
+            "pointInTime":false,
+            "temporal":true
+          }
+
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/boottime":{
+            "metric":"boottime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_aidle":{
+            "metric":"cpu_aidle",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_idle":{
+            "metric":"cpu_idle",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_nice":{
+            "metric":"cpu_nice",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_num":{
+            "metric":"cpu_num",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_speed":{
+            "metric":"cpu_speed",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_system":{
+            "metric":"cpu_system",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_user":{
+            "metric":"cpu_user",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/cpu/cpu_wio":{
+            "metric":"cpu_wio",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/disk/disk_free":{
+            "metric":"disk_free",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/disk/disk_total":{
+            "metric":"disk_total",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/disk/part_max_used":{
+            "metric":"part_max_used",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/gcCount":{
+            "metric":"jvm.metrics.gcCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/gcTimeMillis":{
+            "metric":"jvm.metrics.gcTimeMillis",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/logError":{
+            "metric":"jvm.metrics.logError",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/logFatal":{
+            "metric":"jvm.metrics.logFatal",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/logInfo":{
+            "metric":"jvm.metrics.logInfo",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/logWarn":{
+            "metric":"jvm.metrics.logWarn",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/maxMemoryM":{
+            "metric":"jvm.metrics.maxMemoryM",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/memHeapCommittedM":{
+            "metric":"jvm.metrics.memHeapCommittedM",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/memHeapUsedM":{
+            "metric":"jvm.metrics.memHeapUsedM",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/memNonHeapCommittedM":{
+            "metric":"jvm.metrics.memNonHeapCommittedM",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/memNonHeapUsedM":{
+            "metric":"jvm.metrics.memNonHeapUsedM",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsBlocked":{
+            "metric":"jvm.metrics.threadsBlocked",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsNew":{
+            "metric":"jvm.metrics.threadsNew",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsRunnable":{
+            "metric":"jvm.metrics.threadsRunnable",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsTerminated":{
+            "metric":"jvm.metrics.threadsTerminated",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsTimedWaiting":{
+            "metric":"jvm.metrics.threadsTimedWaiting",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/jvm/threadsWaiting":{
+            "metric":"jvm.metrics.threadsWaiting",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/ChannelCapacity":{
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelCapacity",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/StartTime":{
+            "metric":"(\\w+).CHANNEL.(\\w+).StartTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/EventTakeAttemptCount":{
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeAttemptCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/EventTakeSuccessCount":{
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/EventPutAttemptCount":{
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutAttemptCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/StopTime":{
+            "metric":"(\\w+).CHANNEL.(\\w+).StopTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/ChannelFillPercentage":{
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelFillPercentage",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/ChannelSize":{
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/CHANNEL/$2/EventPutSuccessCount":{
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/ConnectionCreatedCount":{
+            "metric":"(\\w+).SINK.(\\w+).ConnectionCreatedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/BatchCompleteCount":{
+            "metric":"(\\w+).SINK.(\\w+).BatchCompleteCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/EventDrainSuccessCount":{
+            "metric":"(\\w+).SINK.(\\w+).EventDrainSuccessCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/StartTime":{
+            "metric":"(\\w+).SINK.(\\w+).StartTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/EventDrainAttemptCount":{
+            "metric":"(\\w+).SINK.(\\w+).EventDrainAttemptCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/ConnectionFailedCount":{
+            "metric":"(\\w+).SINK.(\\w+).ConnectionFailedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/BatchUnderflowCount":{
+            "metric":"(\\w+).SINK.(\\w+).BatchUnderflowCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/ConnectionClosedCount":{
+            "metric":"(\\w+).SINK.(\\w+).ConnectionClosedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/StopTime":{
+            "metric":"(\\w+).SINK.(\\w+).StopTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SINK/$2/BatchEmptyCount":{
+            "metric":"(\\w+).SINK.(\\w+).BatchEmptyCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/AppendBatchReceivedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).AppendBatchReceivedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/AppendAcceptedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).AppendAcceptedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/StartTime":{
+            "metric":"(\\w+).SOURCE.(\\w+).StartTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/OpenConnectionCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).OpenConnectionCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/AppendBatchAcceptedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).AppendBatchAcceptedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/AppendReceivedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).AppendReceivedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/EventReceivedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).EventReceivedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/StopTime":{
+            "metric":"(\\w+).SOURCE.(\\w+).StopTime",
+            "pointInTime":true,
+            "temporal":true
+          },
+          "metrics/flume/$1/SOURCE/$2/EventAcceptedCount":{
+            "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount",
+            "pointInTime":true,
+            "temporal":true
+          },
+
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._sum",
+            "pointInTime":false,
+            "temporal":true
+          },
+
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._sum",
+            "pointInTime":false,
+            "temporal":true
+          },
+          
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._sum",
+            "pointInTime":false,
+            "temporal":true
+          }
+          
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/files/alert_flume_agent_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/files/alert_flume_agent_status.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/files/alert_flume_agent_status.py
new file mode 100644
index 0000000..b183bbc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/files/alert_flume_agent_status.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import socket
+
+from resource_management.libraries.functions.flume_agent_helper import find_expected_agent_names
+from resource_management.libraries.functions.flume_agent_helper import get_flume_status
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+FLUME_CONF_DIR_KEY = '{{flume-env/flume_conf_dir}}'
+
+FLUME_RUN_DIR = '/var/run/flume'
+
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (FLUME_CONF_DIR_KEY,)
+  
+
+def execute(parameters=None, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  parameters (dictionary): a mapping of parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+
+  if parameters is None:
+    return (RESULT_CODE_UNKNOWN, ['There were no parameters supplied to the script.'])
+
+  flume_conf_directory = None
+  if FLUME_CONF_DIR_KEY in parameters:
+    flume_conf_directory = parameters[FLUME_CONF_DIR_KEY]
+
+  if flume_conf_directory is None:
+    return (RESULT_CODE_UNKNOWN, ['The Flume configuration directory is a required parameter.'])
+
+  if host_name is None:
+    host_name = socket.getfqdn()
+
+  processes = get_flume_status(flume_conf_directory, FLUME_RUN_DIR)
+  expected_agents = find_expected_agent_names(flume_conf_directory)
+
+  alert_label = ''
+  alert_state = RESULT_CODE_OK
+
+  if len(processes) == 0 and len(expected_agents) == 0:
+    alert_label = 'No agents defined on {0}'.format(host_name)
+  else:
+    ok = []
+    critical = []
+    text_arr = []
+
+    for process in processes:
+      if not process.has_key('status') or process['status'] == 'NOT_RUNNING':
+        critical.append(process['name'])
+      else:
+        ok.append(process['name'])
+
+    if len(critical) > 0:
+      text_arr.append("{0} {1} NOT running".format(", ".join(critical),
+        "is" if len(critical) == 1 else "are"))
+
+    if len(ok) > 0:
+      text_arr.append("{0} {1} running".format(", ".join(ok),
+        "is" if len(ok) == 1 else "are"))
+
+    plural = len(critical) > 1 or len(ok) > 1
+    alert_label = "Agent{0} {1} {2}".format(
+      "s" if plural else "",
+      " and ".join(text_arr),
+      "on " + host_name)
+
+    alert_state = RESULT_CODE_CRITICAL if len(critical) > 0 else RESULT_CODE_OK
+
+  return (alert_state, [alert_label])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume.py
new file mode 100644
index 0000000..f4870ad
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume.py
@@ -0,0 +1,206 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import glob
+import json
+import os
+from resource_management import *
+from resource_management.libraries.functions.flume_agent_helper import is_flume_process_live
+from resource_management.libraries.functions.flume_agent_helper import find_expected_agent_names
+
+def flume(action = None):
+  import params
+
+  if action == 'config':
+    # remove previously defined meta's
+    for n in find_expected_agent_names(params.flume_conf_dir):
+      os.unlink(os.path.join(params.flume_conf_dir, n, 'ambari-meta.json'))
+
+    Directory(params.flume_conf_dir, recursive=True)
+    Directory(params.flume_log_dir, owner=params.flume_user)
+
+    flume_agents = {}
+    if params.flume_conf_content is not None:
+      flume_agents = build_flume_topology(params.flume_conf_content)
+
+    for agent in flume_agents.keys():
+      flume_agent_conf_dir = os.path.join(params.flume_conf_dir, agent)
+      flume_agent_conf_file = os.path.join(flume_agent_conf_dir, 'flume.conf')
+      flume_agent_meta_file = os.path.join(flume_agent_conf_dir, 'ambari-meta.json')
+      flume_agent_log4j_file = os.path.join(flume_agent_conf_dir, 'log4j.properties')
+      flume_agent_env_file = os.path.join(flume_agent_conf_dir, 'flume-env.sh')
+
+      Directory(flume_agent_conf_dir)
+
+      PropertiesFile(flume_agent_conf_file,
+        properties=flume_agents[agent],
+        mode = 0644)
+
+      File(flume_agent_log4j_file,
+        content=Template('log4j.properties.j2', agent_name = agent),
+        mode = 0644)
+
+      File(flume_agent_meta_file,
+        content = json.dumps(ambari_meta(agent, flume_agents[agent])),
+        mode = 0644)
+
+      File(flume_agent_env_file,
+           owner=params.flume_user,
+           content=InlineTemplate(params.flume_env_sh_template)
+      )
+
+      if params.has_metric_collector:
+        File(os.path.join(flume_agent_conf_dir, "flume-metrics2.properties"),
+             owner=params.flume_user,
+             content=Template("flume-metrics2.properties.j2")
+        )
+
+  elif action == 'start':
+    # desired state for service should be STARTED
+    if len(params.flume_command_targets) == 0:
+      _set_desired_state('STARTED')
+
+    # It is important to run this command as a background process.
+    
+    
+    flume_base = as_user(format("{flume_bin} agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}}"), params.flume_user, env={'JAVA_HOME': params.java_home}) + " &"
+
+    for agent in cmd_target_names():
+      flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent
+      flume_agent_conf_file = flume_agent_conf_dir + os.sep + "flume.conf"
+      flume_agent_pid_file = params.flume_run_dir + os.sep + agent + ".pid"
+
+      if not os.path.isfile(flume_agent_conf_file):
+        continue
+
+      if not is_flume_process_live(flume_agent_pid_file):
+        # TODO someday make the ganglia ports configurable
+        extra_args = ''
+        if params.ganglia_server_host is not None:
+          extra_args = '-Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts={0}:{1}'
+          extra_args = extra_args.format(params.ganglia_server_host, '8655')
+        if params.has_metric_collector:
+          extra_args = '-Dflume.monitoring.type=org.apache.hadoop.metrics2.sink.flume.FlumeTimelineMetricsSink'
+
+        flume_cmd = flume_base.format(agent, flume_agent_conf_dir,
+           flume_agent_conf_file, extra_args)
+
+        Execute(flume_cmd, 
+          wait_for_finish=False,
+          environment={'JAVA_HOME': params.java_home}
+        )
+
+        # sometimes startup spawns a couple of threads - so only the first line may count
+        pid_cmd = format('pgrep -o -u {flume_user} -f ^{java_home}.*{agent}.* > {flume_agent_pid_file}')
+        Execute(pid_cmd, logoutput=True, tries=20, try_sleep=6)
+
+    pass
+  elif action == 'stop':
+    # desired state for service should be INSTALLED
+    if len(params.flume_command_targets) == 0:
+      _set_desired_state('INSTALLED')
+
+    pid_files = glob.glob(params.flume_run_dir + os.sep + "*.pid")
+
+    if 0 == len(pid_files):
+      return
+
+    agent_names = cmd_target_names()
+
+
+    for agent in agent_names:
+      pid_file = params.flume_run_dir + os.sep + agent + '.pid'
+      pid = format('`cat {pid_file}` > /dev/null 2>&1')
+      Execute(format('kill {pid}'), ignore_failures=True)
+      File(pid_file, action = 'delete')
+
+
+def ambari_meta(agent_name, agent_conf):
+  res = {}
+
+  sources = agent_conf[agent_name + '.sources'].split(' ')
+  res['sources_count'] = len(sources)
+
+  sinks = agent_conf[agent_name + '.sinks'].split(' ')
+  res['sinks_count'] = len(sinks)
+
+  channels = agent_conf[agent_name + '.channels'].split(' ')
+  res['channels_count'] = len(channels)
+
+  return res
+
+
+# define a map of dictionaries, where the key is agent name
+# and the dictionary is the name/value pair
+def build_flume_topology(content):
+
+  result = {}
+  agent_names = []
+
+  for line in content.split('\n'):
+    rline = line.strip()
+    if 0 != len(rline) and not rline.startswith('#'):
+      pair = rline.split('=')
+      lhs = pair[0].strip()
+      rhs = pair[1].strip()
+
+      part0 = lhs.split('.')[0]
+
+      if lhs.endswith(".sources"):
+        agent_names.append(part0)
+
+      if not result.has_key(part0):
+        result[part0] = {}
+
+      result[part0][lhs] = rhs
+
+  # trim out non-agents
+  for k in result.keys():
+    if not k in agent_names:
+      del result[k]
+
+  return result
+
+
+def cmd_target_names():
+  import params
+
+  if len(params.flume_command_targets) > 0:
+    return params.flume_command_targets
+  else:
+    return find_expected_agent_names(params.flume_conf_dir)
+
+
+def _set_desired_state(state):
+  import params
+  filename = os.path.join(params.flume_run_dir, 'ambari-state.txt')
+  File(filename,
+    content = state,
+  )
+
+
+def get_desired_state():
+  import params
+
+  try:
+    with open(os.path.join(params.flume_run_dir, 'ambari-state.txt'), 'r') as fp:
+      return fp.read()
+  except:
+    return 'INSTALLED'
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_check.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_check.py
new file mode 100644
index 0000000..b93b8e8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_check.py
@@ -0,0 +1,40 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class FlumeServiceCheck(Script):
+
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    if params.security_enabled:
+      principal_replaced = params.http_principal.replace("_HOST", params.hostname)
+      Execute(format("{kinit_path_local} -kt {http_keytab} {principal_replaced}"),
+              user=params.smoke_user)
+
+    Execute(format('env JAVA_HOME={java_home} {flume_bin} version'),
+            logoutput=True,
+            tries = 3,
+            try_sleep = 20)
+
+if __name__ == "__main__":
+  FlumeServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_handler.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_handler.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_handler.py
new file mode 100644
index 0000000..a13f507
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_handler.py
@@ -0,0 +1,79 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from flume import flume
+from flume import get_desired_state
+
+from resource_management import *
+from resource_management.libraries.functions.flume_agent_helper import find_expected_agent_names
+from resource_management.libraries.functions.flume_agent_helper import get_flume_status
+
+class FlumeHandler(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+
+    flume(action='start')
+
+  def stop(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+
+    flume(action='stop')
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+
+    flume(action='config')
+
+  def status(self, env):
+    import params
+    env.set_params(params)
+
+
+    processes = get_flume_status(params.flume_conf_dir, params.flume_run_dir)
+    expected_agents = find_expected_agent_names(params.flume_conf_dir)
+
+    json = {}
+    json['processes'] = processes
+    self.put_structured_out(json)
+
+    # only throw an exception if there are agents defined and there is a 
+    # problem with the processes; if there are no agents defined, then 
+    # the service should report STARTED (green) ONLY if the desired state is started.  otherwise, INSTALLED (red)
+    if len(expected_agents) > 0:
+      for proc in processes:
+        if not proc.has_key('status') or proc['status'] == 'NOT_RUNNING':
+          raise ComponentIsNotRunning()
+    elif len(expected_agents) == 0 and 'INSTALLED' == get_desired_state():
+      raise ComponentIsNotRunning()
+
+if __name__ == "__main__":
+  FlumeHandler().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
new file mode 100644
index 0000000..2997867
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
@@ -0,0 +1,80 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management import *
+
+config = Script.get_config()
+
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+
+security_enabled = False
+
+hdp_stack_version = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
+
+#hadoop params
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  flume_bin = '/usr/hdp/current/flume-server/bin/flume-ng'
+  flume_hive_home = '/usr/hdp/current/hive-metastore'
+  flume_hcat_home = '/usr/hdp/current/hive-webhcat'
+else:
+  flume_bin = '/usr/bin/flume-ng'
+  flume_hive_home = '/usr/lib/hive'
+  flume_hcat_home = '/usr/lib/hive-hcatalog'
+
+flume_conf_dir = '/etc/flume/conf'
+java_home = config['hostLevelParams']['java_home']
+flume_log_dir = '/var/log/flume'
+flume_run_dir = '/var/run/flume'
+flume_user = 'flume'
+flume_group = 'flume'
+
+if 'flume-env' in config['configurations'] and 'flume_user' in config['configurations']['flume-env']:
+  flume_user = config['configurations']['flume-env']['flume_user']
+
+if (('flume-conf' in config['configurations']) and('content' in config['configurations']['flume-conf'])):
+  flume_conf_content = config['configurations']['flume-conf']['content']
+else:
+  flume_conf_content = None
+
+if (('flume-log4j' in config['configurations']) and ('content' in config['configurations']['flume-log4j'])):
+  flume_log4j_content = config['configurations']['flume-log4j']['content']
+else:
+  flume_log4j_content = None
+
+targets = default('/commandParams/flume_handler', None)
+flume_command_targets = [] if targets is None else targets.split(',')
+
+flume_env_sh_template = config['configurations']['flume-env']['content']
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', [])
+ganglia_server_host = None
+if 0 != len(ganglia_server_hosts):
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hostname = None
+if config.has_key('hostname'):
+  hostname = config['hostname']
+
+ams_collector_hosts = default("/clusterHostInfo/metric_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+if has_metric_collector:
+  metric_collector_host = ams_collector_hosts[0]

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2 b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
new file mode 100644
index 0000000..7458bf8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume-metrics2.properties.j2
@@ -0,0 +1,22 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+collector={{metric_collector_host}}:8188
+collectionFrequency=60000
+maxRowCacheSize=10000
+sendInterval=59000
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume.conf.j2 b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume.conf.j2
new file mode 100644
index 0000000..4dee67f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/flume.conf.j2
@@ -0,0 +1,24 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+# flume.conf: Add your flume configuration here and start flume
+#             Note if you are using the Windows srvice or Unix service
+#             provided by the HDP distribution, they will assume the
+#             agent's name in this file to be 'a1'
+#
+{{flume_agent_conf_content}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/log4j.properties.j2
new file mode 100644
index 0000000..3b34db8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/templates/log4j.properties.j2
@@ -0,0 +1,67 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Define some default values that can be overridden by system properties.
+#
+# For testing, it may also be convenient to specify
+# -Dflume.root.logger=DEBUG,console when launching flume.
+
+#flume.root.logger=DEBUG,console
+flume.root.logger=INFO,LOGFILE
+flume.log.dir={{flume_log_dir}}
+flume.log.file=flume-{{agent_name}}.log
+
+log4j.logger.org.apache.flume.lifecycle = INFO
+log4j.logger.org.jboss = WARN
+log4j.logger.org.mortbay = INFO
+log4j.logger.org.apache.avro.ipc.NettyTransceiver = WARN
+log4j.logger.org.apache.hadoop = INFO
+
+# Define the root logger to the system property "flume.root.logger".
+log4j.rootLogger=${flume.root.logger}
+
+
+# Stock log4j rolling file appender
+# Default log rotation configuration
+log4j.appender.LOGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.LOGFILE.MaxFileSize=100MB
+log4j.appender.LOGFILE.MaxBackupIndex=10
+log4j.appender.LOGFILE.File=${flume.log.dir}/${flume.log.file}
+log4j.appender.LOGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.LOGFILE.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
+
+
+# Warning: If you enable the following appender it will fill up your disk if you don't have a cleanup job!
+# This uses the updated rolling file appender from log4j-extras that supports a reliable time-based rolling policy.
+# See http://logging.apache.org/log4j/companions/extras/apidocs/org/apache/log4j/rolling/TimeBasedRollingPolicy.html
+# Add "DAILY" to flume.root.logger above if you want to use this
+log4j.appender.DAILY=org.apache.log4j.rolling.RollingFileAppender
+log4j.appender.DAILY.rollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy
+log4j.appender.DAILY.rollingPolicy.ActiveFileName=${flume.log.dir}/${flume.log.file}
+log4j.appender.DAILY.rollingPolicy.FileNamePattern=${flume.log.dir}/${flume.log.file}.%d{yyyy-MM-dd}
+log4j.appender.DAILY.layout=org.apache.log4j.PatternLayout
+log4j.appender.DAILY.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
+
+
+# console
+# Add "console" to flume.root.logger above if you want to use this
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/alerts.json b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/alerts.json
new file mode 100644
index 0000000..7605787
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/alerts.json
@@ -0,0 +1,137 @@
+{
+  "GANGLIA": {
+    "service": [],
+    "GANGLIA_SERVER": [
+      {
+        "name": "ganglia_server_process",
+        "label": "Ganglia Server Process",
+        "description": "This host-level alert is triggered if the Ganglia server process cannot be established to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "8651",
+          "default_port": 8651,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "ganglia_monitor_hdfs_namenode",
+        "label": "Ganglia NameNode Process Monitor",
+        "description": "This host-level alert is triggered if the Ganglia gmond process which handles receiving metrics for HDFS NameNode is not up and listening.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "8661",
+          "default_port": 8661,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "ganglia_monitor_hbase_master",
+        "label": "Ganglia HBase Master Process Monitor",
+        "description": "This host-level alert is triggered if the Ganglia gmond process which handles receiving metrics for the HBase Master process is not up and listening.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "8663",
+          "default_port": 8663,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "ganglia_monitor_yarn_resourcemanager",
+        "label": "Ganglia ResourceManager Process Monitor",
+        "description": "This host-level alert is triggered if the Ganglia gmond process which handles receiving metrics for the YARN ResourceManager process is not up and listening.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "8664",
+          "default_port": 8664,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "ganglia_monitor_mapreduce_history_server",
+        "label": "Ganglia History Server Process Monitor",
+        "description": "This host-level alert is triggered if the Ganglia gmond process which handles receiving metrics for the MapReduce History Server process is not up and listening.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "8666",
+          "default_port": 8666,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/configuration/ganglia-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/configuration/ganglia-env.xml b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/configuration/ganglia-env.xml
new file mode 100644
index 0000000..3328acf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/configuration/ganglia-env.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>ganglia_conf_dir</name>
+    <value>/etc/ganglia/hdp</value>
+    <description>Config directory for Ganglia</description>
+  </property>
+  <property>
+    <name>ganglia_runtime_dir</name>
+    <value>/var/run/ganglia/hdp</value>
+    <description>Run directories for Ganglia</description>
+  </property>
+  <property>
+    <name>gmetad_user</name>
+    <value>nobody</value>
+    <property-type>USER GROUP</property-type>
+    <description>User </description>
+  </property>
+    <property>
+    <name>gmond_user</name>
+    <value>nobody</value>
+    <property-type>USER GROUP</property-type>
+    <description>User </description>
+  </property>
+  <property>
+    <name>rrdcached_base_dir</name>
+    <value>/var/lib/ganglia/rrds</value>
+    <description>Default directory for saving the rrd files on ganglia server</description>
+  </property>
+  <property>
+    <name>rrdcached_timeout</name>
+    <value>3600</value>
+    <description>(-w) Data is written to disk every timeout seconds. If this option is not specified the default interval of 300 seconds will be used.</description>
+  </property>
+  <property>
+    <name>rrdcached_flush_timeout</name>
+    <value>7200</value>
+      <description>(-f) Every timeout seconds the entire cache is searched for old values which are written to disk. This only concerns files to which updates have stopped, so setting this to a high value, such as 3600 seconds, is acceptable in most cases. This timeout defaults to 3600 seconds.</description>
+  </property>
+  <property>
+    <name>rrdcached_delay</name>
+    <value>1800</value>
+    <description>(-z) If specified, rrdcached will delay writing of each RRD for a random number of seconds in the range [0,delay). This will avoid too many writes being queued simultaneously. This value should be no greater than the value specified in -w. By default, there is no delay.</description>
+  </property>
+  <property>
+    <name>rrdcached_write_threads</name>
+    <value>4</value>
+    <description>(-t) Specifies the number of threads used for writing RRD files. The default is 4. Increasing this number will allow rrdcached to have more simultaneous I/O requests into the kernel. This may allow the kernel to re-order disk writes, resulting in better disk throughput.</description>
+  </property>
+  <property>
+    <name>additional_clusters</name>
+    <value> </value>
+    <description>Add additional desired Ganglia metrics cluster in the form "name1:port1,name2:port2". Ensure that the names and ports are unique across all cluster and ports are available on ganglia server host. Ambari has reserved ports 8667-8669 within its own pool.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/metainfo.xml b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/metainfo.xml
new file mode 100644
index 0000000..4e96ade
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/GANGLIA/3.5.0/metainfo.xml
@@ -0,0 +1,127 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>GANGLIA</name>
+      <displayName>Ganglia</displayName>
+      <comment>Ganglia Metrics Collection system (&lt;a href=&quot;http://oss.oetiker.ch/rrdtool/&quot; target=&quot;_blank&quot;&gt;RRDTool&lt;/a&gt; will be installed too)</comment>
+      <version>3.5.0</version>
+      <components>
+        <component>
+          <name>GANGLIA_SERVER</name>
+          <displayName>Ganglia Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/ganglia_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>GANGLIA_MONITOR</name>
+          <displayName>Ganglia Monitor</displayName>
+          <category>SLAVE</category>
+          <cardinality>ALL</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/ganglia_monitor.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>python-rrdtool-1.4.5</name>
+            </package>
+            <package>
+              <name>libganglia-3.5.0-99</name>
+            </package>
+            <package>
+              <name>ganglia-devel-3.5.0-99</name>
+            </package>
+            <package>
+              <name>ganglia-gmetad-3.5.0-99</name>
+            </package>
+            <package>
+              <name>ganglia-web-3.5.7-99.noarch</name>
+            </package>
+            <package>
+              <name>ganglia-gmond-3.5.0-99</name>
+            </package>
+            <package>
+              <name>ganglia-gmond-modules-python-3.5.0-99</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>python-rrdtool</name>
+            </package>
+            <package>
+              <name>gmetad</name>
+            </package>
+            <package>
+              <name>ganglia-webfrontend</name>
+            </package>
+            <package>
+              <name>ganglia-monitor-python</name>
+            </package>
+            <package>
+              <name>rrdcached</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>apache2</name>
+            </package>
+            <package>
+              <name>apache2?mod_php*</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6</osFamily>
+          <packages>
+            <package>
+              <name>httpd</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <configuration-dependencies>
+        <config-type>ganglia-env</config-type>
+      </configuration-dependencies>
+      <monitoringService>true</monitoringService>
+    </service>
+  </services>
+</metainfo>


[09/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer.log
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer.log b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer.log
new file mode 100644
index 0000000..2010c02
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer.log
@@ -0,0 +1,29 @@
+Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
+Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
+Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
+Jul 28, 2014 5:04:07 PM           2                  0 B             5.40 GB            9.79 GB
+Jul 28, 2014 5:05:14 PM           3                  0 B             5.06 GB            9.79 GB
+Jul 28, 2014 5:05:50 PM           4                  0 B             5.06 GB            9.79 GB
+Jul 28, 2014 5:06:56 PM           5                  0 B             4.81 GB            9.79 GB
+Jul 28, 2014 5:07:33 PM           6                  0 B             4.80 GB            9.79 GB
+Jul 28, 2014 5:09:11 PM           7                  0 B             4.29 GB            9.79 GB
+Jul 28, 2014 5:09:47 PM           8                  0 B             4.29 GB            9.79 GB
+Jul 28, 2014 5:11:24 PM           9                  0 B             3.89 GB            9.79 GB
+Jul 28, 2014 5:12:00 PM          10                  0 B             3.86 GB            9.79 GB
+Jul 28, 2014 5:13:37 PM          11                  0 B             3.23 GB            9.79 GB
+Jul 28, 2014 5:15:13 PM          12                  0 B             2.53 GB            9.79 GB
+Jul 28, 2014 5:15:49 PM          13                  0 B             2.52 GB            9.79 GB
+Jul 28, 2014 5:16:25 PM          14                  0 B             2.51 GB            9.79 GB
+Jul 28, 2014 5:17:01 PM          15                  0 B             2.39 GB            9.79 GB
+Jul 28, 2014 5:17:37 PM          16                  0 B             2.38 GB            9.79 GB
+Jul 28, 2014 5:18:14 PM          17                  0 B             2.31 GB            9.79 GB
+Jul 28, 2014 5:18:50 PM          18                  0 B             2.30 GB            9.79 GB
+Jul 28, 2014 5:19:26 PM          19                  0 B             2.21 GB            9.79 GB
+Jul 28, 2014 5:20:02 PM          20                  0 B             2.10 GB            9.79 GB
+Jul 28, 2014 5:20:38 PM          21                  0 B             2.06 GB            9.79 GB
+Jul 28, 2014 5:22:14 PM          22                  0 B             1.68 GB            9.79 GB
+Jul 28, 2014 5:23:20 PM          23                  0 B             1.00 GB            9.79 GB
+Jul 28, 2014 5:23:56 PM          24                  0 B          1016.16 MB            9.79 GB
+Jul 28, 2014 5:25:33 PM          25                  0 B            30.55 MB            9.79 GB
+The cluster is balanced. Exiting...
+Balancing took 24.858033333333335 minutes

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/hdfs-command.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/hdfs-command.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/hdfs-command.py
new file mode 100644
index 0000000..0cce48c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/hdfs-command.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import time
+import sys
+from threading import Thread
+
+
+def write_function(path, handle, interval):
+  with open(path) as f:
+      for line in f:
+          handle.write(line)
+          handle.flush()
+          time.sleep(interval)
+          
+thread = Thread(target =  write_function, args = ('balancer.log', sys.stdout, 1.5))
+thread.start()
+
+threaderr = Thread(target =  write_function, args = ('balancer-err.log', sys.stderr, 1.5 * 0.023))
+threaderr.start()
+
+thread.join()  
+
+
+def rebalancer_out():
+  write_function('balancer.log', sys.stdout)
+  
+def rebalancer_err():
+  write_function('balancer-err.log', sys.stdout)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
new file mode 100644
index 0000000..4afd4ce
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
@@ -0,0 +1,88 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import datanode_upgrade
+from hdfs_datanode import datanode
+from resource_management import *
+from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from hdfs import hdfs
+
+
+class DataNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env, params.exclude_packages)
+    env.set_params(params)
+
+
+  def pre_rolling_restart(self, env):
+    Logger.info("Executing DataNode Rolling Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set hadoop-hdfs-datanode {version}"))
+
+
+  def post_rolling_restart(self, env):
+    Logger.info("Executing DataNode Rolling Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    # ensure the DataNode has started and rejoined the cluster
+    datanode_upgrade.post_upgrade_check()
+
+
+  def start(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    datanode(action="start")
+
+
+  def stop(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+
+    # pre-upgrade steps shutdown the datanode, so there's no need to call
+    # action=stop
+    if rolling_restart:
+      datanode_upgrade.pre_upgrade_shutdown()
+    else:
+      datanode(action="stop")
+
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hdfs()
+    datanode(action="configure")
+
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.datanode_pid_file)
+
+
+if __name__ == "__main__":
+  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
new file mode 100644
index 0000000..88af1f9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode_upgrade.py
@@ -0,0 +1,114 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.core.shell import call
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.decorator import retry
+
+
+def pre_upgrade_shutdown():
+  """
+  Runs the "shutdownDatanode {ipc_address} upgrade" command to shutdown the
+  DataNode in preparation for an upgrade. This will then periodically check
+  "getDatanodeInfo" to ensure the DataNode has shutdown correctly.
+  This function will obtain the Kerberos ticket if security is enabled.
+  :return:
+  """
+  import params
+
+  Logger.info('DataNode executing "shutdownDatanode" command in preparation for upgrade...')
+  if params.security_enabled:
+    Execute(params.dn_kinit_cmd, user = params.hdfs_user)
+
+  command = format('hdfs dfsadmin -shutdownDatanode {dfs_dn_ipc_address} upgrade')
+  Execute(command, user=params.hdfs_user, tries=1 )
+
+  # verify that the datanode is down
+  _check_datanode_shutdown()
+
+
+def post_upgrade_check():
+  """
+  Verifies that the DataNode has rejoined the cluster. This function will
+  obtain the Kerberos ticket if security is enabled.
+  :return:
+  """
+  import params
+
+  Logger.info("Checking that the DataNode has rejoined the cluster after upgrade...")
+  if params.security_enabled:
+    Execute(params.dn_kinit_cmd,user = params.hdfs_user)
+
+  # verify that the datanode has started and rejoined the HDFS cluster
+  _check_datanode_startup()
+
+
+@retry(times=12, sleep_time=10, err_class=Fail)
+def _check_datanode_shutdown():
+  """
+  Checks that a DataNode is down by running "hdfs dfsamin getDatanodeInfo"
+  several times, pausing in between runs. Once the DataNode stops responding
+  this method will return, otherwise it will raise a Fail(...) and retry
+  automatically.
+  :return:
+  """
+  import params
+
+  command = format('hdfs dfsadmin -getDatanodeInfo {dfs_dn_ipc_address}')
+
+  try:
+    Execute(command, user=params.hdfs_user, tries=1)
+  except:
+    Logger.info("DataNode has successfully shutdown for upgrade.")
+    return
+
+  Logger.info("DataNode has not shutdown.")
+  raise Fail('DataNode has not shutdown.')
+
+
+@retry(times=12, sleep_time=10, err_class=Fail)
+def _check_datanode_startup():
+  """
+  Checks that a DataNode is reported as being alive via the
+  "hdfs dfsadmin -report -live" command. Once the DataNode is found to be
+  alive this method will return, otherwise it will raise a Fail(...) and retry
+  automatically.
+  :return:
+  """
+  import params
+
+  try:
+    # 'su - hdfs -c "hdfs dfsadmin -report -live"'
+    command = 'hdfs dfsadmin -report -live'
+    return_code, hdfs_output = call(command, user=params.hdfs_user)
+  except:
+    raise Fail('Unable to determine if the DataNode has started after upgrade.')
+
+  if return_code == 0:
+    if params.hostname.lower() in hdfs_output.lower():
+      Logger.info("DataNode {0} reports that it has rejoined the cluster.".format(params.hostname))
+      return
+    else:
+      raise Fail("DataNode {0} was not found in the list of live DataNodes".format(params.hostname))
+
+  # return_code is not 0, fail
+  raise Fail("Unable to determine if the DataNode has started after upgrade (result code {0})".format(str(return_code)))

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
new file mode 100644
index 0000000..25c1067
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -0,0 +1,83 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+import os
+
+
+def hdfs(name=None):
+  import params
+
+  # On some OS this folder could be not exists, so we will create it before pushing there files
+  Directory(params.limits_conf_dir,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+
+  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hdfs.conf.j2")
+  )
+
+  if params.security_enabled:
+    tc_mode = 0644
+    tc_owner = "root"
+  else:
+    tc_mode = None
+    tc_owner = params.hdfs_user
+
+  if "hadoop-policy" in params.config['configurations']:
+    XmlConfig("hadoop-policy.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['hadoop-policy'],
+              configuration_attributes=params.config['configuration_attributes']['hadoop-policy'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  File(os.path.join(params.hadoop_conf_dir, 'slaves'),
+       owner=tc_owner,
+       content=Template("slaves.j2")
+  )
+  
+  if params.lzo_enabled:
+    Package(params.lzo_packages_for_current_host)

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
new file mode 100644
index 0000000..b9f244a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs import hdfs
+from utils import service
+
+
+class HdfsClient(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env, params.exclude_packages)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+
+  def stop(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def config(self, env):
+    import params
+    hdfs()
+    pass
+
+
+if __name__ == "__main__":
+  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
new file mode 100644
index 0000000..d432d88
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py
@@ -0,0 +1,62 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.dfs_datanode_helper import handle_dfs_data_dir
+from utils import service
+
+
+def create_dirs(data_dir, params):
+  """
+  :param data_dir: The directory to create
+  :param params: parameters
+  """
+  Directory(data_dir,
+            recursive=True,
+            recursive_permission=True,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            ignore_failures=True
+  )
+
+
+def datanode(action=None):
+  import params
+  if action == "configure":
+    Directory(params.dfs_domain_socket_dir,
+              recursive=True,
+              mode=0751,
+              owner=params.hdfs_user,
+              group=params.user_group)
+
+    handle_dfs_data_dir(create_dirs, params)
+
+  elif action == "start" or action == "stop":
+    Directory(params.hadoop_pid_dir_prefix,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
new file mode 100644
index 0000000..9f470a3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -0,0 +1,202 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.exceptions import ComponentIsNotRunning
+
+from utils import service, safe_zkfc_op
+
+
+def namenode(action=None, do_format=True, rolling_restart=False, env=None):
+  import params
+  #we need this directory to be present before any action(HA manual steps for
+  #additional namenode)
+  if action == "configure":
+    create_name_dirs(params.dfs_name_dir)
+
+  if action == "start":
+    if do_format:
+      format_namenode()
+      pass
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+    Directory(params.hadoop_pid_dir_prefix,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+    options = "-rollingUpgrade started" if rolling_restart else ""
+
+    service(
+      action="start",
+      name="namenode",
+      user=params.hdfs_user,
+      options=options,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+
+    if rolling_restart:    
+      # Must start Zookeeper Failover Controller if it exists on this host because it could have been killed in order to initiate the failover.
+      safe_zkfc_op(action, env)
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
+              user = params.hdfs_user)
+
+    if params.dfs_ha_enabled:
+      dfs_check_nn_status_cmd = as_user(format("hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active"), params.hdfs_user, env={'PATH':params.hadoop_bin_dir})
+    else:
+      dfs_check_nn_status_cmd = None
+
+    namenode_safe_mode_off = format("hadoop dfsadmin -safemode get | grep 'Safe mode is OFF'")
+
+    # If HA is enabled and it is in standby, then stay in safemode, otherwise, leave safemode.
+    leave_safe_mode = True
+    if dfs_check_nn_status_cmd is not None:
+      code, out = shell.call(dfs_check_nn_status_cmd) # If active NN, code will be 0
+      if code != 0:
+        leave_safe_mode = False
+
+    if leave_safe_mode:
+      # First check if Namenode is not in 'safemode OFF' (equivalent to safemode ON), if so, then leave it
+      code, out = shell.call(namenode_safe_mode_off)
+      if code != 0:
+        leave_safe_mode_cmd = format("hdfs --config {hadoop_conf_dir} dfsadmin -safemode leave")
+        Execute(leave_safe_mode_cmd,
+                user=params.hdfs_user,
+                path=[params.hadoop_bin_dir],
+        )
+
+    # Verify if Namenode should be in safemode OFF
+    Execute(namenode_safe_mode_off,
+            tries=40,
+            try_sleep=10,
+            path=[params.hadoop_bin_dir],
+            user=params.hdfs_user,
+            only_if=dfs_check_nn_status_cmd #skip when HA not active
+    )
+    create_hdfs_directories(dfs_check_nn_status_cmd)
+
+  if action == "stop":
+    service(
+      action="stop", name="namenode", 
+      user=params.hdfs_user
+    )
+
+  if action == "decommission":
+    decommission()
+
+def create_name_dirs(directories):
+  import params
+
+  dirs = directories.split(",")
+  Directory(dirs,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            recursive=True,
+            recursive_permission=True
+  )
+
+
+def create_hdfs_directories(check):
+  import params
+
+  params.HdfsDirectory("/tmp",
+                       action="create_delayed",
+                       owner=params.hdfs_user,
+                       mode=0777
+  )
+  params.HdfsDirectory(params.smoke_hdfs_user_dir,
+                       action="create_delayed",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode
+  )
+  params.HdfsDirectory(None, action="create",
+                       only_if=check #skip creation when HA not active
+  )
+
+def format_namenode(force=None):
+  import params
+
+  old_mark_dir = params.namenode_formatted_old_mark_dir
+  mark_dir = params.namenode_formatted_mark_dir
+  dfs_name_dir = params.dfs_name_dir
+  hdfs_user = params.hdfs_user
+  hadoop_conf_dir = params.hadoop_conf_dir
+
+  if not params.dfs_ha_enabled:
+    if force:
+      ExecuteHadoop('namenode -format',
+                    kinit_override=True,
+                    bin_dir=params.hadoop_bin_dir,
+                    conf_dir=hadoop_conf_dir)
+    else:
+      File(format("{tmp_dir}/checkForFormat.sh"),
+           content=StaticFile("checkForFormat.sh"),
+           mode=0755)
+      Execute(format(
+        "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} "
+        "{hadoop_bin_dir} {old_mark_dir} {mark_dir} {dfs_name_dir}"),
+              not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
+              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
+      )
+    
+      Directory(mark_dir,
+        recursive = True
+      )
+
+
+def decommission():
+  import params
+
+  hdfs_user = params.hdfs_user
+  conf_dir = params.hadoop_conf_dir
+  user_group = params.user_group
+  nn_kinit_cmd = params.nn_kinit_cmd
+  
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=hdfs_user,
+       group=user_group
+  )
+  
+  if not params.update_exclude_file_only:
+    Execute(nn_kinit_cmd,
+            user=hdfs_user
+    )
+
+    if params.dfs_ha_enabled:
+      # due to a bug in hdfs, refreshNodes will not run on both namenodes so we
+      # need to execute each command scoped to a particular namenode
+      nn_refresh_cmd = format('dfsadmin -fs hdfs://{namenode_rpc} -refreshNodes')
+    else:
+      nn_refresh_cmd = format('dfsadmin -refreshNodes')
+    ExecuteHadoop(nn_refresh_cmd,
+                  user=hdfs_user,
+                  conf_dir=conf_dir,
+                  kinit_override=True,
+                  bin_dir=params.hadoop_bin_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_rebalance.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_rebalance.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_rebalance.py
new file mode 100644
index 0000000..1dc545e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_rebalance.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import re
+
+class HdfsParser():
+  def __init__(self):
+    self.initialLine = None
+    self.state = None
+  
+  def parseLine(self, line):
+    hdfsLine = HdfsLine()
+    type, matcher = hdfsLine.recognizeType(line)
+    if(type == HdfsLine.LineType.HeaderStart):
+      self.state = 'PROCESS_STARTED'
+    elif (type == HdfsLine.LineType.Progress):
+      self.state = 'PROGRESS'
+      hdfsLine.parseProgressLog(line, matcher)
+      if(self.initialLine == None): self.initialLine = hdfsLine
+      
+      return hdfsLine 
+    elif (type == HdfsLine.LineType.ProgressEnd):
+      self.state = 'PROCESS_FINISED'
+    return None
+    
+class HdfsLine():
+  
+  class LineType:
+    HeaderStart, Progress, ProgressEnd, Unknown = range(4)
+  
+  
+  MEMORY_SUFFIX = ['B','KB','MB','GB','TB','PB','EB']
+  MEMORY_PATTERN = '(?P<memmult_%d>(?P<memory_%d>(\d+)(.|,)?(\d+)?) (?P<mult_%d>'+"|".join(MEMORY_SUFFIX)+'))'
+  
+  HEADER_BEGIN_PATTERN = re.compile('Time Stamp\w+Iteration#\w+Bytes Already Moved\w+Bytes Left To Move\w+Bytes Being Moved')
+  PROGRESS_PATTERN = re.compile(
+                            "(?P<date>.*?)\s+" + 
+                            "(?P<iteration>\d+)\s+" + 
+                            MEMORY_PATTERN % (1,1,1) + "\s+" + 
+                            MEMORY_PATTERN % (2,2,2) + "\s+" +
+                            MEMORY_PATTERN % (3,3,3)
+                            )
+  PROGRESS_END_PATTERN = re.compile('(The cluster is balanced. Exiting...|The cluster is balanced. Exiting...)')
+  
+  def __init__(self):
+    self.date = None
+    self.iteration = None
+    self.bytesAlreadyMoved = None 
+    self.bytesLeftToMove = None
+    self.bytesBeingMoved = None 
+    self.bytesAlreadyMovedStr = None 
+    self.bytesLeftToMoveStr = None
+    self.bytesBeingMovedStr = None 
+  
+  def recognizeType(self, line):
+    for (type, pattern) in (
+                            (HdfsLine.LineType.HeaderStart, self.HEADER_BEGIN_PATTERN),
+                            (HdfsLine.LineType.Progress, self.PROGRESS_PATTERN), 
+                            (HdfsLine.LineType.ProgressEnd, self.PROGRESS_END_PATTERN)
+                            ):
+      m = re.match(pattern, line)
+      if m:
+        return type, m
+    return HdfsLine.LineType.Unknown, None
+    
+  def parseProgressLog(self, line, m):
+    '''
+    Parse the line of 'hdfs rebalancer' output. The example output being parsed:
+    
+    Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved
+    Jul 28, 2014 5:01:49 PM           0                  0 B             5.74 GB            9.79 GB
+    Jul 28, 2014 5:03:00 PM           1                  0 B             5.58 GB            9.79 GB
+    
+    Throws AmbariException in case of parsing errors
+
+    '''
+    m = re.match(self.PROGRESS_PATTERN, line)
+    if m:
+      self.date = m.group('date') 
+      self.iteration = int(m.group('iteration'))
+       
+      self.bytesAlreadyMoved = self.parseMemory(m.group('memory_1'), m.group('mult_1')) 
+      self.bytesLeftToMove = self.parseMemory(m.group('memory_2'), m.group('mult_2')) 
+      self.bytesBeingMoved = self.parseMemory(m.group('memory_3'), m.group('mult_3'))
+       
+      self.bytesAlreadyMovedStr = m.group('memmult_1') 
+      self.bytesLeftToMoveStr = m.group('memmult_2')
+      self.bytesBeingMovedStr = m.group('memmult_3') 
+    else:
+      raise AmbariException("Failed to parse line [%s]") 
+  
+  def parseMemory(self, memorySize, multiplier_type):
+    try:
+      factor = self.MEMORY_SUFFIX.index(multiplier_type)
+    except ValueError:
+      raise AmbariException("Failed to memory value [%s %s]" % (memorySize, multiplier_type))
+    
+    return float(memorySize) * (1024 ** factor)
+  def toJson(self):
+    return {
+            'timeStamp' : self.date,
+            'iteration' : self.iteration,
+            
+            'dataMoved': self.bytesAlreadyMovedStr,
+            'dataLeft' : self.bytesLeftToMoveStr,
+            'dataBeingMoved': self.bytesBeingMovedStr,
+            
+            'bytesMoved': self.bytesAlreadyMoved,
+            'bytesLeft' : self.bytesLeftToMove,
+            'bytesBeingMoved': self.bytesBeingMoved,
+          }
+  def __str__(self):
+    return "[ date=%s,iteration=%d, bytesAlreadyMoved=%d, bytesLeftToMove=%d, bytesBeingMoved=%d]"%(self.date, self.iteration, self.bytesAlreadyMoved, self.bytesLeftToMove, self.bytesBeingMoved)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_snamenode.py
new file mode 100644
index 0000000..c650c4d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_snamenode.py
@@ -0,0 +1,51 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+
+
+def snamenode(action=None, format=False):
+  import params
+
+  if action == "configure":
+    Directory(params.fs_checkpoint_dir,
+              recursive=True,
+              recursive_permission=True,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group)
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group)
+  elif action == "start" or action == "stop":
+    Directory(params.hadoop_pid_dir_prefix,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
new file mode 100644
index 0000000..f664bcd
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
@@ -0,0 +1,90 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.format import format
+
+from utils import service
+from hdfs import hdfs
+
+
+class JournalNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env, params.exclude_packages)
+    env.set_params(params)
+
+  def pre_rolling_restart(self, env):
+    Logger.info("Executing Rolling Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set hadoop-hdfs-journalnode {version}"))
+
+  def start(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    Directory(params.hadoop_pid_dir_prefix,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    service(
+      action="start", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def stop(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+    service(
+      action="stop", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def configure(self, env):
+    import params
+
+    Directory(params.jn_edits_dir,
+              recursive=True,
+              recursive_permission=True,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    env.set_params(params)
+    hdfs()
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.journalnode_pid_file)
+
+
+if __name__ == "__main__":
+  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
new file mode 100644
index 0000000..c8a460f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -0,0 +1,162 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+import os
+import json
+import subprocess
+from datetime import datetime
+
+from resource_management import *
+from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+
+from hdfs_namenode import namenode
+from hdfs import hdfs
+import hdfs_rebalance
+from utils import failover_namenode
+
+
+class NameNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env, params.exclude_packages)
+    env.set_params(params)
+    #TODO we need this for HA because of manual steps
+    self.configure(env)
+
+  def pre_rolling_restart(self, env):
+    Logger.info("Executing Rolling Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set hadoop-hdfs-namenode {version}"))
+
+  def start(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    namenode(action="start", rolling_restart=rolling_restart, env=env)
+
+  def post_rolling_restart(self, env):
+    Logger.info("Executing Rolling Upgrade post-restart")
+    import params
+    env.set_params(params)
+
+    Execute("hdfs dfsadmin -report -live",
+            user=params.hdfs_principal_name if params.security_enabled else params.hdfs_user
+    )
+
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+
+    if rolling_restart and params.dfs_ha_enabled:
+      if params.dfs_ha_automatic_failover_enabled:
+        failover_namenode()
+      else:
+        raise Fail("Rolling Upgrade - dfs.ha.automatic-failover.enabled must be enabled to perform a rolling restart")
+
+    namenode(action="stop", rolling_restart=rolling_restart, env=env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    hdfs()
+    namenode(action="configure", env=env)
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.namenode_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="decommission")
+    pass
+  
+    
+  def rebalancehdfs(self, env):
+    import params
+    env.set_params(params)
+
+    name_node_parameters = json.loads( params.name_node_params )
+    threshold = name_node_parameters['threshold']
+    _print("Starting balancer with threshold = %s\n" % threshold)
+    
+    def calculateCompletePercent(first, current):
+      return 1.0 - current.bytesLeftToMove/first.bytesLeftToMove
+    
+    
+    def startRebalancingProcess(threshold):
+      rebalanceCommand = format('hdfs --config {hadoop_conf_dir} balancer -threshold {threshold}')
+      return as_user(rebalanceCommand, params.hdfs_user, env={'PATH': params.hadoop_bin_dir})
+    
+    command = startRebalancingProcess(threshold)
+    
+    basedir = os.path.join(env.config.basedir, 'scripts')
+    if(threshold == 'DEBUG'): #FIXME TODO remove this on PROD
+      basedir = os.path.join(env.config.basedir, 'scripts', 'balancer-emulator')
+      command = ['python','hdfs-command.py']
+    
+    _print("Executing command %s\n" % command)
+    
+    parser = hdfs_rebalance.HdfsParser()
+    proc = subprocess.Popen(
+                            command, 
+                            stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE,
+                            shell=True,
+                            close_fds=True,
+                            cwd=basedir
+                           )
+    for line in iter(proc.stdout.readline, ''):
+      _print('[balancer] %s %s' % (str(datetime.now()), line ))
+      pl = parser.parseLine(line)
+      if pl:
+        res = pl.toJson()
+        res['completePercent'] = calculateCompletePercent(parser.initialLine, pl) 
+        
+        self.put_structured_out(res)
+      elif parser.state == 'PROCESS_FINISED' : 
+        _print('[balancer] %s %s' % (str(datetime.now()), 'Process is finished' ))
+        self.put_structured_out({'completePercent' : 1})
+        break
+    
+    proc.stdout.close()
+    proc.wait()
+    if proc.returncode != None and proc.returncode != 0:
+      raise Fail('Hdfs rebalance process exited with error. See the log output')
+      
+def _print(line):
+  sys.stdout.write(line)
+  sys.stdout.flush()
+
+if __name__ == "__main__":
+  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
new file mode 100644
index 0000000..12353de
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
@@ -0,0 +1,289 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management import *
+import status_params
+import utils
+import os
+import itertools
+import re
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = status_params.hdfs_user
+hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
+
+# Some datanode settings
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
+secure_dn_ports_are_in_use = False
+
+#hadoop params
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+  hadoop_libexec_dir = "/usr/hdp/current/hadoop-client/libexec"
+  hadoop_bin = "/usr/hdp/current/hadoop-client/sbin"    # TODO Rolling Upgrade, switch from hadoop-client to server when starting daemon.
+  hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
+  hadoop_home = "/usr/hdp/current/hadoop-client"
+  if not security_enabled:
+    hadoop_secure_dn_user = '""'
+  else:
+    dfs_dn_port = utils.get_port(dfs_dn_addr)
+    dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
+    dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
+    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+    if dfs_http_policy == "HTTPS_ONLY":
+      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
+    elif dfs_http_policy == "HTTP_AND_HTTPS":
+      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
+    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+      secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
+    if secure_dn_ports_are_in_use:
+      hadoop_secure_dn_user = hdfs_user
+    else:
+      hadoop_secure_dn_user = '""'
+else:
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_bin = "/usr/lib/hadoop/sbin"
+  hadoop_bin_dir = "/usr/bin"
+  hadoop_home = "/usr/lib/hadoop"
+  hadoop_secure_dn_user = hdfs_user
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+limits_conf_dir = "/etc/security/limits.d"
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
+ulimit_cmd = "ulimit -c unlimited && "
+
+#security params
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+falcon_user = config['configurations']['falcon-env']['falcon_user']
+
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
+
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+nm_host = default("/clusterHostInfo/nm_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_histroryserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_journalnode_hosts = not len(journalnode_hosts)  == 0
+has_zkfc_hosts = not len(zkfc_hosts)  == 0
+has_falcon_host = not len(falcon_host)  == 0
+
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+#users and groups
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+webhcat_user = config['configurations']['hive-env']['hcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+hive_user = config['configurations']['hive-env']['hive_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
+dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
+
+jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
+
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+
+namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
+namenode_dirs_stub_filename = "namenode_dirs_created"
+
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+namenode_formatted_old_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
+namenode_formatted_mark_dir = format("/var/lib/hdfs/namenode/formatted/")
+
+fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
+
+dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
+dfs_data_dir = ",".join([re.sub(r'^\[.+\]', '', dfs_dir.strip()) for dfs_dir in dfs_data_dir.split(",")])
+
+data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
+
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
+
+namenode_id = None
+namenode_rpc = None
+
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+      namenode_rpc = nn_host
+
+journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
+if journalnode_address:
+  journalnode_port = journalnode_address.split(":")[1]
+  
+  
+if security_enabled:
+  _dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+  _dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+  _dn_principal_name = _dn_principal_name.replace('_HOST',hostname.lower())
+  
+  dn_kinit_cmd = format("{kinit_path_local} -kt {_dn_keytab} {_dn_principal_name};")
+  
+  _nn_principal_name = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+  _nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+  _nn_principal_name = _nn_principal_name.replace('_HOST',hostname.lower())
+  
+  nn_kinit_cmd = format("{kinit_path_local} -kt {_nn_keytab} {_nn_principal_name};")  
+else:
+  dn_kinit_cmd = ""
+  nn_kinit_cmd = ""  
+
+import functools
+#create partial functions with common arguments for every HdfsDirectory call
+#to create hdfs directory we need to call params.HdfsDirectory in code
+HdfsDirectory = functools.partial(
+  HdfsDirectory,
+  conf_dir=hadoop_conf_dir,
+  hdfs_user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
+)
+
+io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
+lzo_enabled = "com.hadoop.compression.lzo" in io_compression_codecs
+# stack_is_hdp22_or_further
+underscorred_version = stack_version_unformatted.replace('.', '_')
+dashed_version = stack_version_unformatted.replace('.', '-')
+lzo_packages_to_family = {
+  "any": ["hadoop-lzo"],
+  "redhat": ["lzo", "hadoop-lzo-native"],
+  "suse": ["lzo", "hadoop-lzo-native"],
+  "ubuntu": ["liblzo2-2"]
+}
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  lzo_packages_to_family["redhat"] += [format("hadooplzo_{underscorred_version}_*")]
+  lzo_packages_to_family["suse"] += [format("hadooplzo_{underscorred_version}_*")]
+  lzo_packages_to_family["ubuntu"] += [format("hadooplzo_{dashed_version}_*")]
+
+lzo_packages_for_current_host = lzo_packages_to_family['any'] + lzo_packages_to_family[System.get_instance().os_family]
+all_lzo_packages = set(itertools.chain(*lzo_packages_to_family.values()))
+ 
+exclude_packages = []
+if not lzo_enabled:
+  exclude_packages += all_lzo_packages
+  
+name_node_params = default("/commandParams/namenode", None)
+
+#hadoop params
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.0') >= 0 and compare_versions(hdp_stack_version, '2.1') < 0 and System.get_instance().os_family != "suse":
+  # deprecated rhel jsvc_path
+  jsvc_path = "/usr/libexec/bigtop-utils"
+else:
+  jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
new file mode 100644
index 0000000..3dc3a1b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/service_check.py
@@ -0,0 +1,119 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class HdfsServiceCheck(Script):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    unique = functions.get_unique_id_and_date()
+    dir = '/tmp'
+    tmp_file = format("{dir}/{unique}")
+
+    safemode_command = "dfsadmin -safemode get | grep OFF"
+
+    create_dir_cmd = format("fs -mkdir {dir}")
+    chmod_command = format("fs -chmod 777 {dir}")
+    test_dir_exists = as_user(format("{hadoop_bin_dir}/hadoop --config {hadoop_conf_dir} fs -test -e {dir}"), params.smoke_user)
+    cleanup_cmd = format("fs -rm {tmp_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file
+    #that needs cleanup; exit code is fn of second command
+    create_file_cmd = format(
+      "{cleanup_cmd}; hadoop --config {hadoop_conf_dir} fs -put /etc/passwd {tmp_file}")
+    test_cmd = format("fs -test -e {tmp_file}")
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_user}"),
+        user=params.smoke_user
+      )
+    ExecuteHadoop(safemode_command,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=20,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    ExecuteHadoop(create_dir_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  not_if=test_dir_exists,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    ExecuteHadoop(chmod_command,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    ExecuteHadoop(create_file_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    ExecuteHadoop(test_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5,
+                  bin_dir=params.hadoop_bin_dir
+    )
+    if params.has_journalnode_hosts:
+      journalnode_port = params.journalnode_port
+      checkWebUIFileName = "checkWebUI.py"
+      checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
+      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
+      checkWebUICmd = format("python {checkWebUIFilePath} -m {comma_sep_jn_hosts} -p {journalnode_port}")
+      File(checkWebUIFilePath,
+           content=StaticFile(checkWebUIFileName),
+           mode=0775)
+
+      Execute(checkWebUICmd,
+              logoutput=True,
+              try_sleep=3,
+              tries=5,
+              user=params.smoke_user
+      )
+
+    if params.is_namenode_master:
+      if params.has_zkfc_hosts:
+        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+        check_zkfc_process_cmd = format(
+          "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+        Execute(check_zkfc_process_cmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5
+        )
+
+
+if __name__ == "__main__":
+  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
new file mode 100644
index 0000000..7106422
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
@@ -0,0 +1,68 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_snamenode import snamenode
+from hdfs import hdfs
+
+
+class SNameNode(Script):
+
+  def install(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.install_packages(env, params.exclude_packages)
+
+  def pre_rolling_restart(self, env):
+    # Secondary namenode is actually removed in an HA cluster, which is a pre-requisite for Rolling Upgrade,
+    # so it does not need any Rolling Restart logic.
+    pass
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+
+    self.configure(env)
+    snamenode(action="start")
+
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+
+    snamenode(action="stop")
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    hdfs()
+    snamenode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.snamenode_pid_file)
+
+
+if __name__ == "__main__":
+  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
new file mode 100644
index 0000000..0027a4c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
new file mode 100644
index 0000000..6f421b6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
@@ -0,0 +1,230 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+import re
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+from resource_management.core.shell import call, checked_call
+from resource_management.core.exceptions import ComponentIsNotRunning
+
+from zkfc_slave import ZkfcSlave
+
+def safe_zkfc_op(action, env):
+  """
+  Idempotent operation on the zkfc process to either start or stop it.
+  :param action: start or stop
+  :param env: environment
+  """
+  zkfc = None
+  if action == "start":
+    try:
+      zkfc = ZkfcSlave()
+      zkfc.status(env)
+    except ComponentIsNotRunning:
+      if zkfc:
+        zkfc.start(env)
+
+  if action == "stop":
+    try:
+      zkfc = ZkfcSlave()
+      zkfc.status(env)
+    except ComponentIsNotRunning:
+      pass
+    else:
+      if zkfc:
+        zkfc.stop(env)
+
+
+def failover_namenode():
+  """
+  Failover the primary namenode by killing zkfc if it exists on this host (assuming this host is the primary).
+  """
+  import params
+  check_service_cmd = format("hdfs haadmin -getServiceState {namenode_id}")
+  code, out = call(check_service_cmd, verbose=True, logoutput=True, user=params.hdfs_user)
+
+  state = "unknown"
+  if code == 0 and out:
+    state = "active" if "active" in out else ("standby" if "standby" in out else state)
+    Logger.info("Namenode service state: %s" % state)
+
+  if state == "active":
+    Logger.info("Rolling Upgrade - Initiating namenode failover by killing zkfc on active namenode")
+
+    # Forcefully kill ZKFC on this host to initiate a failover
+    kill_zkfc(params.hdfs_user)
+
+    # Wait until it transitions to standby
+    check_standby_cmd = format("hdfs haadmin -getServiceState {namenode_id} | grep standby")
+    Execute(check_standby_cmd,
+            user=params.hdfs_user,
+            tries=30,
+            try_sleep=6,
+            logoutput=True)
+  else:
+    Logger.info("Rolling Upgrade - Host %s is the standby namenode." % str(params.hostname))
+
+
+def kill_zkfc(zkfc_user):
+  """
+  There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
+  Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
+  Option 2. Silent failover (not supported as of HDP 2.2.0.0)
+  :param zkfc_user: User that started the ZKFC process.
+  """
+  import params
+  if params.dfs_ha_enabled:
+    zkfc_pid_file = get_service_pid_file("zkfc", zkfc_user)
+    if zkfc_pid_file:
+      check_process = format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1")
+      code, out = call(check_process, verbose=True)
+      if code == 0:
+        Logger.debug("ZKFC is running and will be killed to initiate namenode failover.")
+        kill_command = format("{check_process} && kill -9 `cat {zkfc_pid_file}` > /dev/null 2>&1")
+        checked_call(kill_command, verbose=True)
+
+
+def get_service_pid_file(name, user):
+  """
+  Get the pid file path that was used to start the service by the user.
+  :param name: Service name
+  :param user: User that started the service.
+  :return: PID file path
+  """
+  import params
+  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
+  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
+  return pid_file
+
+
+def service(action=None, name=None, user=None, options="", create_pid_dir=False,
+            create_log_dir=False):
+  """
+  :param action: Either "start" or "stop"
+  :param name: Component name, e.g., "namenode", "datanode", "secondarynamenode", "zkfc"
+  :param user: User to run the command as
+  :param options: Additional options to pass to command as a string
+  :param create_pid_dir: Create PID directory
+  :param create_log_dir: Crate log file directory
+  """
+  import params
+
+  options = options if options else ""
+  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
+  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
+  log_dir = format("{hdfs_log_dir_prefix}/{user}")
+  check_process = format(
+    "ls {pid_file} >/dev/null 2>&1 &&"
+    " ps -p `cat {pid_file}` >/dev/null 2>&1")
+
+  if create_pid_dir:
+    Directory(pid_dir,
+              owner=user,
+              recursive=True)
+  if create_log_dir:
+    Directory(log_dir,
+              owner=user,
+              recursive=True)
+
+  hadoop_env_exports = {
+    'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
+  }
+
+  if params.security_enabled and name == "datanode":
+    ## The directory where pid files are stored in the secure data environment.
+    hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+    hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
+
+    # At Champlain stack and further, we may start datanode as a non-root even in secure cluster
+    if not (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0) or params.secure_dn_ports_are_in_use:
+      user = "root"
+      pid_file = format(
+        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+
+    if action == 'stop' and (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0) and \
+      os.path.isfile(hadoop_secure_dn_pid_file):
+        # We need special handling for this case to handle the situation
+        # when we configure non-root secure DN and then restart it
+        # to handle new configs. Otherwise we will not be able to stop
+        # a running instance 
+        user = "root"
+        
+        try:
+          check_process_status(hadoop_secure_dn_pid_file)
+          
+          custom_export = {
+            'HADOOP_SECURE_DN_USER': params.hdfs_user
+          }
+          hadoop_env_exports.update(custom_export)
+          
+        except ComponentIsNotRunning:
+          pass
+
+  hadoop_daemon = format("{hadoop_bin}/hadoop-daemon.sh")
+
+  if user == "root":
+    cmd = [hadoop_daemon, "--config", params.hadoop_conf_dir, action, name]
+    if options:
+      cmd += [options, ]
+    daemon_cmd = as_sudo(cmd)
+  else:
+    cmd = format("{ulimit_cmd} {hadoop_daemon} --config {hadoop_conf_dir} {action} {name}")
+    if options:
+      cmd += " " + options
+    daemon_cmd = as_user(cmd, user)
+     
+  service_is_up = check_process if action == "start" else None
+  #remove pid file from dead process
+  File(pid_file,
+       action="delete",
+       not_if=check_process
+  )
+  Execute(daemon_cmd,
+          not_if=service_is_up,
+          environment=hadoop_env_exports
+  )
+
+  if action == "stop":
+    File(pid_file,
+         action="delete",
+    )
+
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
new file mode 100644
index 0000000..ee8b418
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py
@@ -0,0 +1,71 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.check_process_status import check_process_status
+
+import utils  # this is needed to avoid a circular dependency since utils.py calls this class
+from hdfs import hdfs
+
+
+class ZkfcSlave(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env, params.exclude_packages)
+    env.set_params(params)
+
+  def start(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+    self.configure(env)
+    Directory(params.hadoop_pid_dir_prefix,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    utils.service(
+      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def stop(self, env, rolling_restart=False):
+    import params
+
+    env.set_params(params)
+    utils.service(
+      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def configure(self, env):
+    hdfs()
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.zkfc_pid_file)
+
+
+if __name__ == "__main__":
+  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..a92cdc1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/hdfs.conf.j2
new file mode 100644
index 0000000..d58a6f5
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/hdfs.conf.j2
@@ -0,0 +1,35 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hdfs_user}}   - nofile 32768
+{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/slaves.j2 b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/slaves.j2
new file mode 100644
index 0000000..4a9e713
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/templates/slaves.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/alerts.json b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/alerts.json
new file mode 100644
index 0000000..5344414
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/alerts.json
@@ -0,0 +1,58 @@
+{
+  "ZOOKEEPER": {
+    "service": [
+      {
+        "name": "zookeeper_server_process_percent",
+        "label": "Percent ZooKeeper Servers Available",
+        "description": "This alert is triggered if the number of down ZooKeeper servers in the cluster is greater than the configured critical threshold. It aggregates the results of ZooKeeper process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "zookeeper_server_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.35
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.70
+            }
+          }
+        }
+      }  
+    ],
+    "ZOOKEEPER_SERVER": [
+      {
+        "name": "zookeeper_server_process",
+        "label": "ZooKeeper Server Process",
+        "description": "This host-level alert is triggered if the ZooKeeper server process cannot be determined to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{zookeeper-env/clientPort}}",
+          "default_port": 2181,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file


[19/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metrics.json
deleted file mode 100644
index 2114c12..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metrics.json
+++ /dev/null
@@ -1,716 +0,0 @@
-{
-  "FLUME_HANDLER": {
-    "Component": [
-        {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/boottime":{
-            "metric":"boottime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_aidle":{
-            "metric":"cpu_aidle",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_idle":{
-            "metric":"cpu_idle",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_nice":{
-            "metric":"cpu_nice",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_num":{
-            "metric":"cpu_num",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_speed":{
-            "metric":"cpu_speed",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_system":{
-            "metric":"cpu_system",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_user":{
-            "metric":"cpu_user",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_wio":{
-            "metric":"cpu_wio",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/disk/disk_free":{
-            "metric":"disk_free",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/disk/disk_total":{
-            "metric":"disk_total",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/disk/part_max_used":{
-            "metric":"part_max_used",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/gcCount":{
-            "metric":"jvm.metrics.gcCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/gcTimeMillis":{
-            "metric":"jvm.metrics.gcTimeMillis",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/logError":{
-            "metric":"jvm.metrics.logError",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/logFatal":{
-            "metric":"jvm.metrics.logFatal",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/logInfo":{
-            "metric":"jvm.metrics.logInfo",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/logWarn":{
-            "metric":"jvm.metrics.logWarn",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/maxMemoryM":{
-            "metric":"jvm.metrics.maxMemoryM",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/memHeapCommittedM":{
-            "metric":"jvm.metrics.memHeapCommittedM",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/memHeapUsedM":{
-            "metric":"jvm.metrics.memHeapUsedM",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/memNonHeapCommittedM":{
-            "metric":"jvm.metrics.memNonHeapCommittedM",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/memNonHeapUsedM":{
-            "metric":"jvm.metrics.memNonHeapUsedM",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsBlocked":{
-            "metric":"jvm.metrics.threadsBlocked",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsNew":{
-            "metric":"jvm.metrics.threadsNew",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsRunnable":{
-            "metric":"jvm.metrics.threadsRunnable",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsTerminated":{
-            "metric":"jvm.metrics.threadsTerminated",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsTimedWaiting":{
-            "metric":"jvm.metrics.threadsTimedWaiting",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsWaiting":{
-            "metric":"jvm.metrics.threadsWaiting",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/ChannelCapacity":{
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelCapacity",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/StartTime":{
-            "metric":"(\\w+).CHANNEL.(\\w+).StartTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/EventTakeAttemptCount":{
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeAttemptCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/EventTakeSuccessCount":{
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/EventPutAttemptCount":{
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutAttemptCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/StopTime":{
-            "metric":"(\\w+).CHANNEL.(\\w+).StopTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/ChannelFillPercentage":{
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelFillPercentage",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/ChannelSize":{
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/EventPutSuccessCount":{
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/ConnectionCreatedCount":{
-            "metric":"(\\w+).SINK.(\\w+).ConnectionCreatedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/BatchCompleteCount":{
-            "metric":"(\\w+).SINK.(\\w+).BatchCompleteCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/EventDrainSuccessCount":{
-            "metric":"(\\w+).SINK.(\\w+).EventDrainSuccessCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/StartTime":{
-            "metric":"(\\w+).SINK.(\\w+).StartTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/EventDrainAttemptCount":{
-            "metric":"(\\w+).SINK.(\\w+).EventDrainAttemptCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/ConnectionFailedCount":{
-            "metric":"(\\w+).SINK.(\\w+).ConnectionFailedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/BatchUnderflowCount":{
-            "metric":"(\\w+).SINK.(\\w+).BatchUnderflowCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/ConnectionClosedCount":{
-            "metric":"(\\w+).SINK.(\\w+).ConnectionClosedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/StopTime":{
-            "metric":"(\\w+).SINK.(\\w+).StopTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/BatchEmptyCount":{
-            "metric":"(\\w+).SINK.(\\w+).BatchEmptyCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/AppendBatchReceivedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).AppendBatchReceivedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/AppendAcceptedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).AppendAcceptedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/StartTime":{
-            "metric":"(\\w+).SOURCE.(\\w+).StartTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/OpenConnectionCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).OpenConnectionCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/AppendBatchAcceptedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).AppendBatchAcceptedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/AppendReceivedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).AppendReceivedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/EventReceivedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).EventReceivedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/StopTime":{
-            "metric":"(\\w+).SOURCE.(\\w+).StopTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/EventAcceptedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/min": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._min",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/max": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._max",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/avg": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._avg",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/sum": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._sum",
-            "pointInTime":false,
-            "temporal":true
-          },
-
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/avg": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._avg",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/max": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._max",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/min": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._min",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/sum": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._sum",
-            "pointInTime":false,
-            "temporal":true
-          },
-          
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/avg": {
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._avg",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/max": {
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._max",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/min": {
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._min",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/sum": {
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._sum",
-            "pointInTime":false,
-            "temporal":true
-          }
-
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/boottime":{
-            "metric":"boottime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_aidle":{
-            "metric":"cpu_aidle",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_idle":{
-            "metric":"cpu_idle",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_nice":{
-            "metric":"cpu_nice",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_num":{
-            "metric":"cpu_num",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_speed":{
-            "metric":"cpu_speed",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_system":{
-            "metric":"cpu_system",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_user":{
-            "metric":"cpu_user",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/cpu/cpu_wio":{
-            "metric":"cpu_wio",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/disk/disk_free":{
-            "metric":"disk_free",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/disk/disk_total":{
-            "metric":"disk_total",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/disk/part_max_used":{
-            "metric":"part_max_used",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/gcCount":{
-            "metric":"jvm.metrics.gcCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/gcTimeMillis":{
-            "metric":"jvm.metrics.gcTimeMillis",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/logError":{
-            "metric":"jvm.metrics.logError",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/logFatal":{
-            "metric":"jvm.metrics.logFatal",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/logInfo":{
-            "metric":"jvm.metrics.logInfo",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/logWarn":{
-            "metric":"jvm.metrics.logWarn",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/maxMemoryM":{
-            "metric":"jvm.metrics.maxMemoryM",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/memHeapCommittedM":{
-            "metric":"jvm.metrics.memHeapCommittedM",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/memHeapUsedM":{
-            "metric":"jvm.metrics.memHeapUsedM",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/memNonHeapCommittedM":{
-            "metric":"jvm.metrics.memNonHeapCommittedM",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/memNonHeapUsedM":{
-            "metric":"jvm.metrics.memNonHeapUsedM",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsBlocked":{
-            "metric":"jvm.metrics.threadsBlocked",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsNew":{
-            "metric":"jvm.metrics.threadsNew",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsRunnable":{
-            "metric":"jvm.metrics.threadsRunnable",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsTerminated":{
-            "metric":"jvm.metrics.threadsTerminated",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsTimedWaiting":{
-            "metric":"jvm.metrics.threadsTimedWaiting",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/jvm/threadsWaiting":{
-            "metric":"jvm.metrics.threadsWaiting",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/ChannelCapacity":{
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelCapacity",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/StartTime":{
-            "metric":"(\\w+).CHANNEL.(\\w+).StartTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/EventTakeAttemptCount":{
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeAttemptCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/EventTakeSuccessCount":{
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/EventPutAttemptCount":{
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutAttemptCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/StopTime":{
-            "metric":"(\\w+).CHANNEL.(\\w+).StopTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/ChannelFillPercentage":{
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelFillPercentage",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/ChannelSize":{
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/CHANNEL/$2/EventPutSuccessCount":{
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/ConnectionCreatedCount":{
-            "metric":"(\\w+).SINK.(\\w+).ConnectionCreatedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/BatchCompleteCount":{
-            "metric":"(\\w+).SINK.(\\w+).BatchCompleteCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/EventDrainSuccessCount":{
-            "metric":"(\\w+).SINK.(\\w+).EventDrainSuccessCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/StartTime":{
-            "metric":"(\\w+).SINK.(\\w+).StartTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/EventDrainAttemptCount":{
-            "metric":"(\\w+).SINK.(\\w+).EventDrainAttemptCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/ConnectionFailedCount":{
-            "metric":"(\\w+).SINK.(\\w+).ConnectionFailedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/BatchUnderflowCount":{
-            "metric":"(\\w+).SINK.(\\w+).BatchUnderflowCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/ConnectionClosedCount":{
-            "metric":"(\\w+).SINK.(\\w+).ConnectionClosedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/StopTime":{
-            "metric":"(\\w+).SINK.(\\w+).StopTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SINK/$2/BatchEmptyCount":{
-            "metric":"(\\w+).SINK.(\\w+).BatchEmptyCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/AppendBatchReceivedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).AppendBatchReceivedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/AppendAcceptedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).AppendAcceptedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/StartTime":{
-            "metric":"(\\w+).SOURCE.(\\w+).StartTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/OpenConnectionCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).OpenConnectionCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/AppendBatchAcceptedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).AppendBatchAcceptedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/AppendReceivedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).AppendReceivedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/EventReceivedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).EventReceivedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/StopTime":{
-            "metric":"(\\w+).SOURCE.(\\w+).StopTime",
-            "pointInTime":true,
-            "temporal":true
-          },
-          "metrics/flume/$1/SOURCE/$2/EventAcceptedCount":{
-            "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount",
-            "pointInTime":true,
-            "temporal":true
-          },
-
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/avg": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._avg",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/max": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._max",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/min": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._min",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/sum": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._sum",
-            "pointInTime":false,
-            "temporal":true
-          },
-
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/avg": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._avg",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/max": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._max",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/min": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._min",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/sum": {
-            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._sum",
-            "pointInTime":false,
-            "temporal":true
-          },
-          
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/avg": {
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._avg",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/max": {
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._max",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/min": {
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._min",
-            "pointInTime":false,
-            "temporal":true
-          },
-          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/sum": {
-            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._sum",
-            "pointInTime":false,
-            "temporal":true
-          }
-          
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/files/alert_flume_agent_status.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/files/alert_flume_agent_status.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/files/alert_flume_agent_status.py
deleted file mode 100644
index b183bbc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/files/alert_flume_agent_status.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import socket
-
-from resource_management.libraries.functions.flume_agent_helper import find_expected_agent_names
-from resource_management.libraries.functions.flume_agent_helper import get_flume_status
-
-RESULT_CODE_OK = 'OK'
-RESULT_CODE_CRITICAL = 'CRITICAL'
-RESULT_CODE_UNKNOWN = 'UNKNOWN'
-
-FLUME_CONF_DIR_KEY = '{{flume-env/flume_conf_dir}}'
-
-FLUME_RUN_DIR = '/var/run/flume'
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (FLUME_CONF_DIR_KEY,)
-  
-
-def execute(parameters=None, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  parameters (dictionary): a mapping of parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  if parameters is None:
-    return (RESULT_CODE_UNKNOWN, ['There were no parameters supplied to the script.'])
-
-  flume_conf_directory = None
-  if FLUME_CONF_DIR_KEY in parameters:
-    flume_conf_directory = parameters[FLUME_CONF_DIR_KEY]
-
-  if flume_conf_directory is None:
-    return (RESULT_CODE_UNKNOWN, ['The Flume configuration directory is a required parameter.'])
-
-  if host_name is None:
-    host_name = socket.getfqdn()
-
-  processes = get_flume_status(flume_conf_directory, FLUME_RUN_DIR)
-  expected_agents = find_expected_agent_names(flume_conf_directory)
-
-  alert_label = ''
-  alert_state = RESULT_CODE_OK
-
-  if len(processes) == 0 and len(expected_agents) == 0:
-    alert_label = 'No agents defined on {0}'.format(host_name)
-  else:
-    ok = []
-    critical = []
-    text_arr = []
-
-    for process in processes:
-      if not process.has_key('status') or process['status'] == 'NOT_RUNNING':
-        critical.append(process['name'])
-      else:
-        ok.append(process['name'])
-
-    if len(critical) > 0:
-      text_arr.append("{0} {1} NOT running".format(", ".join(critical),
-        "is" if len(critical) == 1 else "are"))
-
-    if len(ok) > 0:
-      text_arr.append("{0} {1} running".format(", ".join(ok),
-        "is" if len(ok) == 1 else "are"))
-
-    plural = len(critical) > 1 or len(ok) > 1
-    alert_label = "Agent{0} {1} {2}".format(
-      "s" if plural else "",
-      " and ".join(text_arr),
-      "on " + host_name)
-
-    alert_state = RESULT_CODE_CRITICAL if len(critical) > 0 else RESULT_CODE_OK
-
-  return (alert_state, [alert_label])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py
deleted file mode 100644
index f4870ad..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py
+++ /dev/null
@@ -1,206 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import glob
-import json
-import os
-from resource_management import *
-from resource_management.libraries.functions.flume_agent_helper import is_flume_process_live
-from resource_management.libraries.functions.flume_agent_helper import find_expected_agent_names
-
-def flume(action = None):
-  import params
-
-  if action == 'config':
-    # remove previously defined meta's
-    for n in find_expected_agent_names(params.flume_conf_dir):
-      os.unlink(os.path.join(params.flume_conf_dir, n, 'ambari-meta.json'))
-
-    Directory(params.flume_conf_dir, recursive=True)
-    Directory(params.flume_log_dir, owner=params.flume_user)
-
-    flume_agents = {}
-    if params.flume_conf_content is not None:
-      flume_agents = build_flume_topology(params.flume_conf_content)
-
-    for agent in flume_agents.keys():
-      flume_agent_conf_dir = os.path.join(params.flume_conf_dir, agent)
-      flume_agent_conf_file = os.path.join(flume_agent_conf_dir, 'flume.conf')
-      flume_agent_meta_file = os.path.join(flume_agent_conf_dir, 'ambari-meta.json')
-      flume_agent_log4j_file = os.path.join(flume_agent_conf_dir, 'log4j.properties')
-      flume_agent_env_file = os.path.join(flume_agent_conf_dir, 'flume-env.sh')
-
-      Directory(flume_agent_conf_dir)
-
-      PropertiesFile(flume_agent_conf_file,
-        properties=flume_agents[agent],
-        mode = 0644)
-
-      File(flume_agent_log4j_file,
-        content=Template('log4j.properties.j2', agent_name = agent),
-        mode = 0644)
-
-      File(flume_agent_meta_file,
-        content = json.dumps(ambari_meta(agent, flume_agents[agent])),
-        mode = 0644)
-
-      File(flume_agent_env_file,
-           owner=params.flume_user,
-           content=InlineTemplate(params.flume_env_sh_template)
-      )
-
-      if params.has_metric_collector:
-        File(os.path.join(flume_agent_conf_dir, "flume-metrics2.properties"),
-             owner=params.flume_user,
-             content=Template("flume-metrics2.properties.j2")
-        )
-
-  elif action == 'start':
-    # desired state for service should be STARTED
-    if len(params.flume_command_targets) == 0:
-      _set_desired_state('STARTED')
-
-    # It is important to run this command as a background process.
-    
-    
-    flume_base = as_user(format("{flume_bin} agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}}"), params.flume_user, env={'JAVA_HOME': params.java_home}) + " &"
-
-    for agent in cmd_target_names():
-      flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent
-      flume_agent_conf_file = flume_agent_conf_dir + os.sep + "flume.conf"
-      flume_agent_pid_file = params.flume_run_dir + os.sep + agent + ".pid"
-
-      if not os.path.isfile(flume_agent_conf_file):
-        continue
-
-      if not is_flume_process_live(flume_agent_pid_file):
-        # TODO someday make the ganglia ports configurable
-        extra_args = ''
-        if params.ganglia_server_host is not None:
-          extra_args = '-Dflume.monitoring.type=ganglia -Dflume.monitoring.hosts={0}:{1}'
-          extra_args = extra_args.format(params.ganglia_server_host, '8655')
-        if params.has_metric_collector:
-          extra_args = '-Dflume.monitoring.type=org.apache.hadoop.metrics2.sink.flume.FlumeTimelineMetricsSink'
-
-        flume_cmd = flume_base.format(agent, flume_agent_conf_dir,
-           flume_agent_conf_file, extra_args)
-
-        Execute(flume_cmd, 
-          wait_for_finish=False,
-          environment={'JAVA_HOME': params.java_home}
-        )
-
-        # sometimes startup spawns a couple of threads - so only the first line may count
-        pid_cmd = format('pgrep -o -u {flume_user} -f ^{java_home}.*{agent}.* > {flume_agent_pid_file}')
-        Execute(pid_cmd, logoutput=True, tries=20, try_sleep=6)
-
-    pass
-  elif action == 'stop':
-    # desired state for service should be INSTALLED
-    if len(params.flume_command_targets) == 0:
-      _set_desired_state('INSTALLED')
-
-    pid_files = glob.glob(params.flume_run_dir + os.sep + "*.pid")
-
-    if 0 == len(pid_files):
-      return
-
-    agent_names = cmd_target_names()
-
-
-    for agent in agent_names:
-      pid_file = params.flume_run_dir + os.sep + agent + '.pid'
-      pid = format('`cat {pid_file}` > /dev/null 2>&1')
-      Execute(format('kill {pid}'), ignore_failures=True)
-      File(pid_file, action = 'delete')
-
-
-def ambari_meta(agent_name, agent_conf):
-  res = {}
-
-  sources = agent_conf[agent_name + '.sources'].split(' ')
-  res['sources_count'] = len(sources)
-
-  sinks = agent_conf[agent_name + '.sinks'].split(' ')
-  res['sinks_count'] = len(sinks)
-
-  channels = agent_conf[agent_name + '.channels'].split(' ')
-  res['channels_count'] = len(channels)
-
-  return res
-
-
-# define a map of dictionaries, where the key is agent name
-# and the dictionary is the name/value pair
-def build_flume_topology(content):
-
-  result = {}
-  agent_names = []
-
-  for line in content.split('\n'):
-    rline = line.strip()
-    if 0 != len(rline) and not rline.startswith('#'):
-      pair = rline.split('=')
-      lhs = pair[0].strip()
-      rhs = pair[1].strip()
-
-      part0 = lhs.split('.')[0]
-
-      if lhs.endswith(".sources"):
-        agent_names.append(part0)
-
-      if not result.has_key(part0):
-        result[part0] = {}
-
-      result[part0][lhs] = rhs
-
-  # trim out non-agents
-  for k in result.keys():
-    if not k in agent_names:
-      del result[k]
-
-  return result
-
-
-def cmd_target_names():
-  import params
-
-  if len(params.flume_command_targets) > 0:
-    return params.flume_command_targets
-  else:
-    return find_expected_agent_names(params.flume_conf_dir)
-
-
-def _set_desired_state(state):
-  import params
-  filename = os.path.join(params.flume_run_dir, 'ambari-state.txt')
-  File(filename,
-    content = state,
-  )
-
-
-def get_desired_state():
-  import params
-
-  try:
-    with open(os.path.join(params.flume_run_dir, 'ambari-state.txt'), 'r') as fp:
-      return fp.read()
-  except:
-    return 'INSTALLED'
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_check.py
deleted file mode 100644
index b93b8e8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_check.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class FlumeServiceCheck(Script):
-
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    if params.security_enabled:
-      principal_replaced = params.http_principal.replace("_HOST", params.hostname)
-      Execute(format("{kinit_path_local} -kt {http_keytab} {principal_replaced}"),
-              user=params.smoke_user)
-
-    Execute(format('env JAVA_HOME={java_home} {flume_bin} version'),
-            logoutput=True,
-            tries = 3,
-            try_sleep = 20)
-
-if __name__ == "__main__":
-  FlumeServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_handler.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_handler.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_handler.py
deleted file mode 100644
index a13f507..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_handler.py
+++ /dev/null
@@ -1,79 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from flume import flume
-from flume import get_desired_state
-
-from resource_management import *
-from resource_management.libraries.functions.flume_agent_helper import find_expected_agent_names
-from resource_management.libraries.functions.flume_agent_helper import get_flume_status
-
-class FlumeHandler(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-    self.configure(env)
-
-    flume(action='start')
-
-  def stop(self, env, rolling_restart=False):
-    import params
-
-    env.set_params(params)
-
-    flume(action='stop')
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-
-    flume(action='config')
-
-  def status(self, env):
-    import params
-    env.set_params(params)
-
-
-    processes = get_flume_status(params.flume_conf_dir, params.flume_run_dir)
-    expected_agents = find_expected_agent_names(params.flume_conf_dir)
-
-    json = {}
-    json['processes'] = processes
-    self.put_structured_out(json)
-
-    # only throw an exception if there are agents defined and there is a 
-    # problem with the processes; if there are no agents defined, then 
-    # the service should report STARTED (green) ONLY if the desired state is started.  otherwise, INSTALLED (red)
-    if len(expected_agents) > 0:
-      for proc in processes:
-        if not proc.has_key('status') or proc['status'] == 'NOT_RUNNING':
-          raise ComponentIsNotRunning()
-    elif len(expected_agents) == 0 and 'INSTALLED' == get_desired_state():
-      raise ComponentIsNotRunning()
-
-if __name__ == "__main__":
-  FlumeHandler().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py
deleted file mode 100644
index 2997867..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py
+++ /dev/null
@@ -1,80 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
-
-config = Script.get_config()
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
-
-security_enabled = False
-
-hdp_stack_version = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
-
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  flume_bin = '/usr/hdp/current/flume-server/bin/flume-ng'
-  flume_hive_home = '/usr/hdp/current/hive-metastore'
-  flume_hcat_home = '/usr/hdp/current/hive-webhcat'
-else:
-  flume_bin = '/usr/bin/flume-ng'
-  flume_hive_home = '/usr/lib/hive'
-  flume_hcat_home = '/usr/lib/hive-hcatalog'
-
-flume_conf_dir = '/etc/flume/conf'
-java_home = config['hostLevelParams']['java_home']
-flume_log_dir = '/var/log/flume'
-flume_run_dir = '/var/run/flume'
-flume_user = 'flume'
-flume_group = 'flume'
-
-if 'flume-env' in config['configurations'] and 'flume_user' in config['configurations']['flume-env']:
-  flume_user = config['configurations']['flume-env']['flume_user']
-
-if (('flume-conf' in config['configurations']) and('content' in config['configurations']['flume-conf'])):
-  flume_conf_content = config['configurations']['flume-conf']['content']
-else:
-  flume_conf_content = None
-
-if (('flume-log4j' in config['configurations']) and ('content' in config['configurations']['flume-log4j'])):
-  flume_log4j_content = config['configurations']['flume-log4j']['content']
-else:
-  flume_log4j_content = None
-
-targets = default('/commandParams/flume_handler', None)
-flume_command_targets = [] if targets is None else targets.split(',')
-
-flume_env_sh_template = config['configurations']['flume-env']['content']
-
-ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', [])
-ganglia_server_host = None
-if 0 != len(ganglia_server_hosts):
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hostname = None
-if config.has_key('hostname'):
-  hostname = config['hostname']
-
-ams_collector_hosts = default("/clusterHostInfo/metric_collector_hosts", [])
-has_metric_collector = not len(ams_collector_hosts) == 0
-if has_metric_collector:
-  metric_collector_host = ams_collector_hosts[0]

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/flume-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/flume-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/flume-metrics2.properties.j2
deleted file mode 100644
index 7458bf8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/flume-metrics2.properties.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-collector={{metric_collector_host}}:8188
-collectionFrequency=60000
-maxRowCacheSize=10000
-sendInterval=59000
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/flume.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/flume.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/flume.conf.j2
deleted file mode 100644
index 4dee67f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/flume.conf.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-
-# flume.conf: Add your flume configuration here and start flume
-#             Note if you are using the Windows srvice or Unix service
-#             provided by the HDP distribution, they will assume the
-#             agent's name in this file to be 'a1'
-#
-{{flume_agent_conf_content}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/log4j.properties.j2
deleted file mode 100644
index 3b34db8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/templates/log4j.properties.j2
+++ /dev/null
@@ -1,67 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#  http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Define some default values that can be overridden by system properties.
-#
-# For testing, it may also be convenient to specify
-# -Dflume.root.logger=DEBUG,console when launching flume.
-
-#flume.root.logger=DEBUG,console
-flume.root.logger=INFO,LOGFILE
-flume.log.dir={{flume_log_dir}}
-flume.log.file=flume-{{agent_name}}.log
-
-log4j.logger.org.apache.flume.lifecycle = INFO
-log4j.logger.org.jboss = WARN
-log4j.logger.org.mortbay = INFO
-log4j.logger.org.apache.avro.ipc.NettyTransceiver = WARN
-log4j.logger.org.apache.hadoop = INFO
-
-# Define the root logger to the system property "flume.root.logger".
-log4j.rootLogger=${flume.root.logger}
-
-
-# Stock log4j rolling file appender
-# Default log rotation configuration
-log4j.appender.LOGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.LOGFILE.MaxFileSize=100MB
-log4j.appender.LOGFILE.MaxBackupIndex=10
-log4j.appender.LOGFILE.File=${flume.log.dir}/${flume.log.file}
-log4j.appender.LOGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.LOGFILE.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
-
-
-# Warning: If you enable the following appender it will fill up your disk if you don't have a cleanup job!
-# This uses the updated rolling file appender from log4j-extras that supports a reliable time-based rolling policy.
-# See http://logging.apache.org/log4j/companions/extras/apidocs/org/apache/log4j/rolling/TimeBasedRollingPolicy.html
-# Add "DAILY" to flume.root.logger above if you want to use this
-log4j.appender.DAILY=org.apache.log4j.rolling.RollingFileAppender
-log4j.appender.DAILY.rollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy
-log4j.appender.DAILY.rollingPolicy.ActiveFileName=${flume.log.dir}/${flume.log.file}
-log4j.appender.DAILY.rollingPolicy.FileNamePattern=${flume.log.dir}/${flume.log.file}.%d{yyyy-MM-dd}
-log4j.appender.DAILY.layout=org.apache.log4j.PatternLayout
-log4j.appender.DAILY.layout.ConversionPattern=%d{dd MMM yyyy HH:mm:ss,SSS} %-5p [%t] (%C.%M:%L) %x - %m%n
-
-
-# console
-# Add "console" to flume.root.logger above if you want to use this
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/alerts.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/alerts.json
deleted file mode 100644
index 7605787..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/alerts.json
+++ /dev/null
@@ -1,137 +0,0 @@
-{
-  "GANGLIA": {
-    "service": [],
-    "GANGLIA_SERVER": [
-      {
-        "name": "ganglia_server_process",
-        "label": "Ganglia Server Process",
-        "description": "This host-level alert is triggered if the Ganglia server process cannot be established to be up and listening on the network.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "8651",
-          "default_port": 8651,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      },
-      {
-        "name": "ganglia_monitor_hdfs_namenode",
-        "label": "Ganglia NameNode Process Monitor",
-        "description": "This host-level alert is triggered if the Ganglia gmond process which handles receiving metrics for HDFS NameNode is not up and listening.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "8661",
-          "default_port": 8661,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      },
-      {
-        "name": "ganglia_monitor_hbase_master",
-        "label": "Ganglia HBase Master Process Monitor",
-        "description": "This host-level alert is triggered if the Ganglia gmond process which handles receiving metrics for the HBase Master process is not up and listening.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "8663",
-          "default_port": 8663,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      },
-      {
-        "name": "ganglia_monitor_yarn_resourcemanager",
-        "label": "Ganglia ResourceManager Process Monitor",
-        "description": "This host-level alert is triggered if the Ganglia gmond process which handles receiving metrics for the YARN ResourceManager process is not up and listening.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "8664",
-          "default_port": 8664,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      },
-      {
-        "name": "ganglia_monitor_mapreduce_history_server",
-        "label": "Ganglia History Server Process Monitor",
-        "description": "This host-level alert is triggered if the Ganglia gmond process which handles receiving metrics for the MapReduce History Server process is not up and listening.",
-        "interval": 1,
-        "scope": "ANY",
-        "enabled": true,
-        "source": {
-          "type": "PORT",
-          "uri": "8666",
-          "default_port": 8666,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/configuration/ganglia-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/configuration/ganglia-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/configuration/ganglia-env.xml
deleted file mode 100644
index 3328acf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/configuration/ganglia-env.xml
+++ /dev/null
@@ -1,77 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>ganglia_conf_dir</name>
-    <value>/etc/ganglia/hdp</value>
-    <description>Config directory for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>gmetad_user</name>
-    <value>nobody</value>
-    <property-type>USER GROUP</property-type>
-    <description>User </description>
-  </property>
-    <property>
-    <name>gmond_user</name>
-    <value>nobody</value>
-    <property-type>USER GROUP</property-type>
-    <description>User </description>
-  </property>
-  <property>
-    <name>rrdcached_base_dir</name>
-    <value>/var/lib/ganglia/rrds</value>
-    <description>Default directory for saving the rrd files on ganglia server</description>
-  </property>
-  <property>
-    <name>rrdcached_timeout</name>
-    <value>3600</value>
-    <description>(-w) Data is written to disk every timeout seconds. If this option is not specified the default interval of 300 seconds will be used.</description>
-  </property>
-  <property>
-    <name>rrdcached_flush_timeout</name>
-    <value>7200</value>
-      <description>(-f) Every timeout seconds the entire cache is searched for old values which are written to disk. This only concerns files to which updates have stopped, so setting this to a high value, such as 3600 seconds, is acceptable in most cases. This timeout defaults to 3600 seconds.</description>
-  </property>
-  <property>
-    <name>rrdcached_delay</name>
-    <value>1800</value>
-    <description>(-z) If specified, rrdcached will delay writing of each RRD for a random number of seconds in the range [0,delay). This will avoid too many writes being queued simultaneously. This value should be no greater than the value specified in -w. By default, there is no delay.</description>
-  </property>
-  <property>
-    <name>rrdcached_write_threads</name>
-    <value>4</value>
-    <description>(-t) Specifies the number of threads used for writing RRD files. The default is 4. Increasing this number will allow rrdcached to have more simultaneous I/O requests into the kernel. This may allow the kernel to re-order disk writes, resulting in better disk throughput.</description>
-  </property>
-  <property>
-    <name>additional_clusters</name>
-    <value> </value>
-    <description>Add additional desired Ganglia metrics cluster in the form "name1:port1,name2:port2". Ensure that the names and ports are unique across all cluster and ports are available on ganglia server host. Ambari has reserved ports 8667-8669 within its own pool.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml
index 4e96ade..33bc837 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml
@@ -20,108 +20,7 @@
   <services>
     <service>
       <name>GANGLIA</name>
-      <displayName>Ganglia</displayName>
-      <comment>Ganglia Metrics Collection system (&lt;a href=&quot;http://oss.oetiker.ch/rrdtool/&quot; target=&quot;_blank&quot;&gt;RRDTool&lt;/a&gt; will be installed too)</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-          <name>GANGLIA_SERVER</name>
-          <displayName>Ganglia Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/ganglia_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>GANGLIA_MONITOR</name>
-          <displayName>Ganglia Monitor</displayName>
-          <category>SLAVE</category>
-          <cardinality>ALL</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/ganglia_monitor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>python-rrdtool-1.4.5</name>
-            </package>
-            <package>
-              <name>libganglia-3.5.0-99</name>
-            </package>
-            <package>
-              <name>ganglia-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <name>ganglia-gmetad-3.5.0-99</name>
-            </package>
-            <package>
-              <name>ganglia-web-3.5.7-99.noarch</name>
-            </package>
-            <package>
-              <name>ganglia-gmond-3.5.0-99</name>
-            </package>
-            <package>
-              <name>ganglia-gmond-modules-python-3.5.0-99</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>python-rrdtool</name>
-            </package>
-            <package>
-              <name>gmetad</name>
-            </package>
-            <package>
-              <name>ganglia-webfrontend</name>
-            </package>
-            <package>
-              <name>ganglia-monitor-python</name>
-            </package>
-            <package>
-              <name>rrdcached</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>apache2</name>
-            </package>
-            <package>
-              <name>apache2?mod_php*</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5,redhat6</osFamily>
-          <packages>
-            <package>
-              <name>httpd</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <configuration-dependencies>
-        <config-type>ganglia-env</config-type>
-      </configuration-dependencies>
-      <monitoringService>true</monitoringService>
+      <extends>common-services/GANGLIA/3.5.0</extends>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmetad.sh
deleted file mode 100644
index f45e371..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmetad.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# Before checking gmetad, check rrdcached.
-./checkRrdcached.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-if [ -n "${gmetadRunningPid}" ]
-then
-  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
-else
-  echo "Failed to find running ${GMETAD_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmond.sh
deleted file mode 100644
index fbf524a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkGmond.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function checkGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-    # Skip over (purported) Clusters that don't have their core conf file present.
-    if [ -e "${gmondCoreConfFileName}" ]
-    then 
-      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-      if [ -n "${gmondRunningPid}" ]
-      then
-        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
-      else
-        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
-        exit 1;
-      fi
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so check
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        checkGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just check the one ${gmondClusterName} that was asked for.
-    checkGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkRrdcached.sh
deleted file mode 100644
index 1e0c2e2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/checkRrdcached.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-if [ -n "${rrdcachedRunningPid}" ]
-then
-  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
-else
-  echo "Failed to find running ${RRDCACHED_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetad.init b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetad.init
deleted file mode 100644
index 20b388e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetad.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmetad startup script
-# processname: hdp-gmetad
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
-HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
-HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmetad..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmetad..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmetad..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetadLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetadLib.sh
deleted file mode 100644
index 6a24bed..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/files/gmetadLib.sh
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/usr/bin/env bash
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMETAD_BIN=/usr/sbin/gmetad;
-GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
-GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
-
-function getGmetadLoggedPid()
-{
-    if [ -e "${GMETAD_PID_FILE}" ]
-    then
-        echo `cat ${GMETAD_PID_FILE}`;
-    fi
-}
-
-function getGmetadRunningPid()
-{
-    gmetadLoggedPid=`getGmetadLoggedPid`;
-
-    if [ -n "${gmetadLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmetadConf()
-{
-    now=`date`;
-
-    cat <<END_OF_GMETAD_CONF_1
-#################### Generated by ${0} on ${now} ####################
-#
-#-------------------------------------------------------------------------------
-# Setting the debug_level to 1 will keep daemon in the forground and
-# show only error messages. Setting this value higher than 1 will make 
-# gmetad output debugging information and stay in the foreground.
-# default: 0
-# debug_level 10
-#
-#-------------------------------------------------------------------------------
-# What to monitor. The most important section of this file. 
-#
-# The data_source tag specifies either a cluster or a grid to
-# monitor. If we detect the source is a cluster, we will maintain a complete
-# set of RRD databases for it, which can be used to create historical 
-# graphs of the metrics. If the source is a grid (it comes from another gmetad),
-# we will only maintain summary RRDs for it.
-#
-# Format: 
-# data_source "my cluster" [polling interval] address1:port addreses2:port ...
-# 
-# The keyword 'data_source' must immediately be followed by a unique
-# string which identifies the source, then an optional polling interval in 
-# seconds. The source will be polled at this interval on average. 
-# If the polling interval is omitted, 15sec is asssumed. 
-#
-# If you choose to set the polling interval to something other than the default,
-# note that the web frontend determines a host as down if its TN value is less
-# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
-# to something around or greater than 80sec, this will cause the frontend to
-# incorrectly display hosts as down even though they are not.
-#
-# A list of machines which service the data source follows, in the 
-# format ip:port, or name:port. If a port is not specified then 8649
-# (the default gmond port) is assumed.
-# default: There is no default value
-#
-# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
-# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
-# data_source "another source" 1.3.4.7:8655  1.3.4.8
-END_OF_GMETAD_CONF_1
-
-    # Get info about all the configured Ganglia clusters.
-    getGangliaClusterInfo | while read gangliaClusterInfoLine
-    do
-        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
-        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
-        # ...and generate a corresponding data_source line for gmetad.conf. 
-        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
-    done
-
-    cat <<END_OF_GMETAD_CONF_2
-#
-# Round-Robin Archives
-# You can specify custom Round-Robin archives here (defaults are listed below)
-#
-# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
-# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-#      "RRA:AVERAGE:0.5:5760:374"
-# New Default RRA
-# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
-# Two weeks of data points at 1 minute resolution (average)
-#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
-# Retaining existing resolution
-RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-     "RRA:AVERAGE:0.5:5760:374"
-#
-#-------------------------------------------------------------------------------
-# Scalability mode. If on, we summarize over downstream grids, and respect
-# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
-# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
-# we are the "authority" on data source feeds. This approach does not scale to
-# large groups of clusters, but is provided for backwards compatibility.
-# default: on
-# scalable off
-#
-#-------------------------------------------------------------------------------
-# The name of this Grid. All the data sources above will be wrapped in a GRID
-# tag with this name.
-# default: unspecified
-gridname "HDP_GRID"
-#
-#-------------------------------------------------------------------------------
-# The authority URL for this grid. Used by other gmetads to locate graphs
-# for our data sources. Generally points to a ganglia/
-# website on this machine.
-# default: "http://hostname/ganglia/",
-#   where hostname is the name of this machine, as defined by gethostname().
-# authority "http://mycluster.org/newprefix/"
-#
-#-------------------------------------------------------------------------------
-# List of machines this gmetad will share XML with. Localhost
-# is always trusted. 
-# default: There is no default value
-# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
-#
-#-------------------------------------------------------------------------------
-# If you want any host which connects to the gmetad XML to receive
-# data, then set this value to "on"
-# default: off
-# all_trusted on
-#
-#-------------------------------------------------------------------------------
-# If you don't want gmetad to setuid then set this to off
-# default: on
-# setuid off
-#
-#-------------------------------------------------------------------------------
-# User gmetad will setuid to (defaults to "nobody")
-# default: "nobody"
-setuid_username "${GMETAD_USER}"
-#
-#-------------------------------------------------------------------------------
-# Umask to apply to created rrd files and grid directory structure
-# default: 0 (files are public)
-# umask 022
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer requests for XML
-# default: 8651
-# xml_port 8651
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer queries for XML. This facility allows
-# simple subtree and summation views of the XML tree.
-# default: 8652
-# interactive_port 8652
-#
-#-------------------------------------------------------------------------------
-# The number of threads answering XML requests
-# default: 4
-# server_threads 10
-#
-#-------------------------------------------------------------------------------
-# Where gmetad stores its round-robin databases
-# default: "/var/lib/ganglia/rrds"
-rrd_rootdir "${RRD_ROOTDIR}"
-#
-#-------------------------------------------------------------------------------
-# In earlier versions of gmetad, hostnames were handled in a case
-# sensitive manner
-# If your hostname directories have been renamed to lower case,
-# set this option to 0 to disable backward compatibility.
-# From version 3.2, backwards compatibility will be disabled by default.
-# default: 1   (for gmetad < 3.2)
-# default: 0   (for gmetad >= 3.2)
-case_sensitive_hostnames 1
-END_OF_GMETAD_CONF_2
-}


[16/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metrics.json
deleted file mode 100644
index 50f9c42..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metrics.json
+++ /dev/null
@@ -1,13635 +0,0 @@
-{
-  "HBASE_REGIONSERVER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/hbase/regionserver/compactionTime_avg_time": {
-            "metric": "hbase.regionserver.compactionTime_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion_num_ops": {
-            "metric": "rpc.rpc.closeRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
-            "metric": "regionserver.Server.mutationsWithoutWALSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/unassign_num_ops": {
-            "metric": "rpc.rpc.unassign_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyTable_num_ops": {
-            "metric": "rpc.rpc.modifyTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion_avg_time": {
-            "metric": "rpc.rpc.getProtocolVersion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore_num_ops": {
-            "metric": "rpc.rpc.getClosestRowBefore_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowAppendCount": {
-            "metric": "regionserver.Server.slowAppendCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow_num_ops": {
-            "metric": "rpc.rpc.lockRow_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion_avg_time": {
-            "metric": "rpc.rpc.flushRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stopMaster_num_ops": {
-            "metric": "rpc.rpc.stopMaster_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.openRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balance_avg_time": {
-            "metric": "rpc.rpc.balance_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyColumn_avg_time": {
-            "metric": "rpc.rpc.modifyColumn_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.multi.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/rootIndexSizeKB": {
-            "metric": "hbase.regionserver.rootIndexSizeKB",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper_num_ops": {
-            "metric": "rpc.rpc.getZooKeeper_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheCount": {
-            "metric": "regionserver.Server.blockCacheCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion_num_ops": {
-            "metric": "rpc.rpc.flushRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_std_dev": {
-            "metric": "hbase.regionserver.putRequestLatency_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_std_dev": {
-            "metric": "hbase.regionserver.getRequestLatency_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/get_num_ops": {
-            "metric": "rpc.rpc.get_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stopMaster_avg_time": {
-            "metric": "rpc.rpc.stopMaster_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.removeFromOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/ping_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.ping_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.openScanner.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo_avg_time": {
-            "metric": "rpc.rpc.getRegionInfo_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow_avg_time": {
-            "metric": "rpc.rpc.lockRow_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/commitPending_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.commitPending_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME_num_ops": {
-            "metric": "rpc.rpc.checkOOME_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/reportRSFatalError_num_ops": {
-            "metric": "rpc.rpc.reportRSFatalError_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/reportRSFatalError_avg_time": {
-            "metric": "rpc.rpc.reportRSFatalError_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.unlockRow.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_min": {
-            "metric": "regionserver.Server.Delete_min",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClusterStatus_num_ops": {
-            "metric": "rpc.rpc.getClusterStatus_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHTableDescriptors_avg_time": {
-            "metric": "rpc.rpc.getHTableDescriptors_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.rpc.rpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteColumn_num_ops": {
-            "metric": "rpc.rpc.deleteColumn_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.delete.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment_num_ops": {
-            "metric": "rpc.rpc.increment_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getMapCompletionEvents_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.stop.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyColumn_num_ops": {
-            "metric": "rpc.rpc.modifyColumn_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME_avg_time": {
-            "metric": "rpc.rpc.checkOOME_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/next/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.next.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcSlowResponse_avg_time": {
-            "metric": "rpc.rpc.RpcSlowResponse_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration_avg_time": {
-            "metric": "rpc.rpc.getConfiguration_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getServerName.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unassign_avg_time": {
-            "metric": "rpc.rpc.unassign_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.delete.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/canCommit_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.canCommit_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.multi.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_75th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
-            "metric": "regionserver.Server.Delete_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion_avg_time": {
-            "metric": "rpc.rpc.compactRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_num_ops": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/writeRequestsCount": {
-            "metric": "regionserver.Server.writeRequestCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor_num_ops": {
-            "metric": "rpc.rpc.execCoprocessor_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/canCommit_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.canCommit_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_min": {
-            "metric": "regionserver.Server.Get_min",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue_avg_time": {
-            "metric": "rpc.rpc.incrementColumnValue_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteTable_num_ops": {
-            "metric": "rpc.rpc.deleteTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
-            "metric": "regionserver.Server.Mutate_75th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheHitCount": {
-            "metric": "regionserver.Server.blockCacheHitCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/exists_avg_time": {
-            "metric": "rpc.rpc.exists_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowPutCount": {
-            "metric": "regionserver.Server.slowPutCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatency_num_ops": {
-            "metric": "hbase.regionserver.fsWriteLatency_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/exists/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.exists.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete_num_ops": {
-            "metric": "rpc.rpc.delete_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/exists_num_ops": {
-            "metric": "rpc.rpc.exists_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerStartup_avg_time": {
-            "metric": "rpc.rpc.regionServerStartup_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete_num_ops": {
-            "metric": "rpc.rpc.checkAndDelete_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion_avg_time": {
-            "metric": "rpc.rpc.closeRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature_avg_time": {
-            "metric": "rpc.rpc.getProtocolSignature_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/assign_avg_time": {
-            "metric": "rpc.rpc.assign_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionSize_num_ops": {
-            "metric": "hbase.regionserver.compactionSize_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close_avg_time": {
-            "metric": "rpc.rpc.close_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheSize": {
-            "metric": "regionserver.Server.blockCacheSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_num_ops": {
-            "metric": "regionserver.Server.Mutate_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo_num_ops": {
-            "metric": "rpc.rpc.getHServerInfo_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop_avg_time": {
-            "metric": "rpc.rpc.stop_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped_num_ops": {
-            "metric": "rpc.rpc.isStopped_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_median": {
-            "metric": "regionserver.Server.Mutate_median",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_num_ops": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_median": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_median",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isMasterRunning_avg_time": {
-            "metric": "rpc.rpc.isMasterRunning_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue_num_ops": {
-            "metric": "rpc.rpc.incrementColumnValue_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_std_dev": {
-            "metric": "hbase.regionserver.deleteRequestLatency_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/hdfsBlocksLocalityIndex": {
-            "metric": "hbase.regionserver.hdfsBlocksLocalityIndex",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/readRequestsCount": {
-            "metric": "regionserver.Server.readRequestCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_min": {
-            "metric": "regionserver.Server.Mutate_min",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/storefileIndexSizeMB": {
-            "metric": "regionserver.Server.storeFileIndexSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/assign_num_ops": {
-            "metric": "rpc.rpc.assign_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.close.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_median": {
-            "metric": "regionserver.Server.Delete_median",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/enableTable_avg_time": {
-            "metric": "rpc.rpc.enableTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_mean": {
-            "metric": "regionserver.Server.Mutate_mean",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close_num_ops": {
-            "metric": "rpc.rpc.close_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/done_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.done_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionSize_avg_time": {
-            "metric": "hbase.regionserver.compactionSize_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.getFromOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_min": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_min",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.increment.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteTable_avg_time": {
-            "metric": "rpc.rpc.deleteTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.put.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete_avg_time": {
-            "metric": "rpc.rpc.delete_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/statusUpdate_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.statusUpdate_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.openRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.compactRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.rpc.rpcAuthenticationFailures",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.openScanner.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClusterStatus_avg_time": {
-            "metric": "rpc.rpc.getClusterStatus_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.unlockRow.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.removeFromOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.put.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyTable_avg_time": {
-            "metric": "rpc.rpc.modifyTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut_avg_time": {
-            "metric": "rpc.rpc.checkAndPut_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.isStopped.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put_avg_time": {
-            "metric": "rpc.rpc.put_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheHitRatio": {
-            "metric": "hbase.regionserver.blockCacheHitRatio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/createTable_avg_time": {
-            "metric": "rpc.rpc.createTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_std_dev": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHTableDescriptors_num_ops": {
-            "metric": "rpc.rpc.getHTableDescriptors_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getAlterStatus_avg_time": {
-            "metric": "rpc.rpc.getAlterStatus_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo_num_ops": {
-            "metric": "rpc.rpc.getRegionInfo_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/statusUpdate_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.statusUpdate_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion_num_ops": {
-            "metric": "rpc.rpc.compactRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted_num_ops": {
-            "metric": "rpc.rpc.isAborted_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_max": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheEvictedCount": {
-            "metric": "regionserver.Server.blockCacheEvictionCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.checkOOME.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/disableTable_num_ops": {
-            "metric": "rpc.rpc.disableTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner_num_ops": {
-            "metric": "rpc.rpc.openScanner_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerReport_num_ops": {
-            "metric": "rpc.rpc.regionServerReport_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions_avg_time": {
-            "metric": "rpc.rpc.openRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/exists/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.exists.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
-            "metric": "regionserver.Server.Mutate_99th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isMasterRunning_num_ops": {
-            "metric": "rpc.rpc.isMasterRunning_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balanceSwitch_num_ops": {
-            "metric": "rpc.rpc.balanceSwitch_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/offline_num_ops": {
-            "metric": "rpc.rpc.offline_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_max": {
-            "metric": "regionserver.Server.Get_max",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort_num_ops": {
-            "metric": "rpc.rpc.abort_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_95th_percentile": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_95th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheHitCachingRatio": {
-            "metric": "hbase.regionserver.blockCacheHitCachingRatio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter_num_ops": {
-            "metric": "rpc.rpc.rollHLogWriter_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions_num_ops": {
-            "metric": "rpc.rpc.openRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion_avg_time": {
-            "metric": "rpc.rpc.splitRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.closeRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
-            "metric": "regionserver.Server.Get_99th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_min": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_min",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_std_dev": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
-            "metric": "regionserver.Server.Delete_99th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_max": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getTask_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getTask_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries_num_ops": {
-            "metric": "rpc.rpc.replicateLogEntries_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi_avg_time": {
-            "metric": "rpc.rpc.multi_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowIncrementCount": {
-            "metric": "regionserver.Server.slowIncrementCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
-            "metric": "regionserver.Server.Mutate_95th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionQueueSize": {
-            "metric": "regionserver.Server.compactionQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker_avg_time": {
-            "metric": "rpc.rpc.getCatalogTracker_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion_num_ops": {
-            "metric": "rpc.rpc.splitRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balance_num_ops": {
-            "metric": "rpc.rpc.balance_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushTime_num_ops": {
-            "metric": "hbase.regionserver.flushTime_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/shutdown_num_ops": {
-            "metric": "rpc.rpc.shutdown_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatency_num_ops": {
-            "metric": "hbase.regionserver.fsReadLatency_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.isAborted.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
-            "metric": "regionserver.Server.Get_75th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName_avg_time": {
-            "metric": "rpc.rpc.getServerName_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionTime_num_ops": {
-            "metric": "hbase.regionserver.compactionTime_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort_avg_time": {
-            "metric": "rpc.rpc.abort_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/enableTable_num_ops": {
-            "metric": "rpc.rpc.enableTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.lockRow.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/stores": {
-            "metric": "regionserver.Server.storeCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/addColumn_avg_time": {
-            "metric": "rpc.rpc.addColumn_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName_num_ops": {
-            "metric": "rpc.rpc.getServerName_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getServerName.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.rpc.rpcAuthenticationSuccesses",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/disableTable_avg_time": {
-            "metric": "rpc.rpc.disableTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.abort.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion_avg_time": {
-            "metric": "rpc.rpc.openRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerReport_avg_time": {
-            "metric": "rpc.rpc.regionServerReport_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getAlterStatus_num_ops": {
-            "metric": "rpc.rpc.getAlterStatus_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.flushRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/next_avg_time": {
-            "metric": "rpc.rpc.next_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_num_ops": {
-            "metric": "regionserver.Server.Get_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles_num_ops": {
-            "metric": "rpc.rpc.bulkLoadHFiles_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/ping_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.ping_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatency_avg_time": {
-            "metric": "hbase.regionserver.fsReadLatency_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushSize_num_ops": {
-            "metric": "hbase.regionserver.flushSize_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balanceSwitch_avg_time": {
-            "metric": "rpc.rpc.balanceSwitch_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_max": {
-            "metric": "regionserver.Server.Mutate_max",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.openRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.lockRow.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.callQueueLen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion_num_ops": {
-            "metric": "rpc.rpc.openRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.compactRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsSyncLatency_num_ops": {
-            "metric": "hbase.regionserver.fsSyncLatency_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_95th_percentile": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_95th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.getOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_75th_percentile": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_75th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/move_num_ops": {
-            "metric": "rpc.rpc.move_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop_num_ops": {
-            "metric": "rpc.rpc.stop_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries_avg_time": {
-            "metric": "rpc.rpc.replicateLogEntries_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_mean": {
-            "metric": "regionserver.Server.Get_mean",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/get_avg_time": {
-            "metric": "rpc.rpc.get_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi_num_ops": {
-            "metric": "rpc.rpc.multi_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/next/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.next.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.addToOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteColumn_avg_time": {
-            "metric": "rpc.rpc.deleteColumn_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/regions": {
-            "metric": "regionserver.Server.regionCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles_avg_time": {
-            "metric": "rpc.rpc.bulkLoadHFiles_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.isAborted.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.stop.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.addToOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.abort.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheFree": {
-            "metric": "regionserver.Server.blockCacheFreeSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/offline_avg_time": {
-            "metric": "rpc.rpc.offline_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow_avg_time": {
-            "metric": "rpc.rpc.unlockRow_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheMissCount": {
-            "metric": "regionserver.Server.blockCacheMissCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker_num_ops": {
-            "metric": "rpc.rpc.getCatalogTracker_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.checkOOME.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushQueueSize": {
-            "metric": "regionserver.Server.flushQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.close.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor_avg_time": {
-            "metric": "rpc.rpc.execCoprocessor_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_mean": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_mean",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/createTable_num_ops": {
-            "metric": "rpc.rpc.createTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration_num_ops": {
-            "metric": "rpc.rpc.getConfiguration_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped_avg_time": {
-            "metric": "rpc.rpc.isStopped_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter_avg_time": {
-            "metric": "rpc.rpc.rollHLogWriter_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsSyncLatency_avg_time": {
-            "metric": "hbase.regionserver.fsSyncLatency_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_mean": {
-            "metric": "regionserver.Server.Delete_mean",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getMapCompletionEvents_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_mean": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_mean",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
-            "metric": "regionserver.Server.staticIndexSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALCount": {
-            "metric": "regionserver.Server.mutationsWithoutWALCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/get/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.get.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_median": {
-            "metric": "regionserver.Server.Get_median",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner_avg_time": {
-            "metric": "rpc.rpc.openScanner_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcSlowResponse_num_ops": {
-            "metric": "rpc.rpc.RpcSlowResponse_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.splitRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted_avg_time": {
-            "metric": "rpc.rpc.isAborted_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.flushRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushSize_avg_time": {
-            "metric": "hbase.regionserver.flushSize_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/commitPending_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.commitPending_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore_avg_time": {
-            "metric": "rpc.rpc.getClosestRowBefore_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_max": {
-            "metric": "regionserver.Server.Delete_max",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/get/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.get.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put_num_ops": {
-            "metric": "rpc.rpc.put_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/move_avg_time": {
-            "metric": "rpc.rpc.move_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/percentFilesLocal": {
-            "metric": "regionserver.Server.percentFilesLocal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatency_avg_time": {
-            "metric": "hbase.regionserver.fsWriteLatency_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.increment.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.openRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getTask_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getTask_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addColumn_num_ops": {
-            "metric": "rpc.rpc.addColumn_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/maxMemoryM": {
-            "metric": "jvm.metrics.maxMemoryM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.getOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushTime_avg_time": {
-            "metric": "hbase.regionserver.flushTime_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/done_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.done_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion_num_ops": {
-            "metric": "rpc.rpc.getProtocolVersion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow_num_ops": {
-            "metric": "rpc.rpc.unlockRow_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowGetCount": {
-            "metric": "regionserver.Server.slowGetCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/shutdown_avg_time": {
-            "metric": "rpc.rpc.shutdown_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerStartup_num_ops": {
-            "metric": "rpc.rpc.regionServerStartup_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/requests": {
-            "metric": "regionserver.Server.totalRequestCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_99th_percentile": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_99th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_99th_percentile": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_99th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/storefiles": {
-            "metric": "regionserver.Server.storeFileCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/next_num_ops": {
-            "metric": "rpc.rpc.next_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowDeleteCount": {
-            "metric": "regionserver.Server.slowDeleteCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete_avg_time": {
-            "metric": "rpc.rpc.checkAndDelete_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.closeRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo_avg_time": {
-            "metric": "rpc.rpc.getHServerInfo_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper_avg_time": {
-            "metric": "rpc.rpc.getZooKeeper_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/hlogFileCount": {
-            "metric": "hbase.regionserver.hlogFileCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
-            "metric": "regionserver.Server.Get_95th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
-            "metric": "regionserver.Server.Delete_95th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/memstoreSize": {
-            "metric": "regionserver.Server.memStoreSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_median": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_median",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature_num_ops": {
-            "metric": "rpc.rpc.getProtocolSignature_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
-            "metric": "regionserver.Server.Delete_75th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.rpcAuthorizationSuccesses",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
-            "metric": "regionserver.Server.staticBloomSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut_num_ops": {
-            "metric": "rpc.rpc.checkAndPut_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment_avg_time": {
-            "metric": "rpc.rpc.increment_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/hbase/regionserver/slowPutCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/percentFilesLocal": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_min": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheFree": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheMissCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/flushQueueSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_num_ops": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowAppendCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_num_ops": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowIncrementCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheEvictedCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/compactionQueueSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_median": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_mean": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowGetCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/readRequestsCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_min": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/storefileIndexSizeMB": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_median": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_max": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_mean": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/requests": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/storefiles": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/writeRequestsCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_median": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowDeleteCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/stores": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_min": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/memstoreSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_max": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_mean": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_max": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/regions": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheHitCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/hbase/regionserver/compactionTime_avg_time": {
-            "metric": "hbase.regionserver.compactionTime_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion_num_ops": {
-            "metric": "rpc.rpc.closeRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
-            "metric": "regionserver.Server.mutationsWithoutWALSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/unassign_num_ops": {
-            "metric": "rpc.rpc.unassign_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyTable_num_ops": {
-            "metric": "rpc.rpc.modifyTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
-            "pointInTime": tru

<TRUNCATED>

[14/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 0a8c874..76bf82e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -22,21 +22,28 @@ from stacks.utils.RMFTestCase import *
 
 @patch("os.path.exists", new = MagicMock(return_value=True))
 class TestHBaseMaster(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HBASE/0.96.0.2.0/package"
+  STACK_VERSION = "2.0.6"
+
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
                    classname = "HbaseMaster",
                    command = "configure",
-                   config_file="default.json"
+                   config_file="default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assert_configure_default()
     self.assertNoMoreResources()
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
                    classname = "HbaseMaster",
                    command = "start",
-                   config_file="default.json"
+                   config_file="default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assert_configure_default()
@@ -47,10 +54,12 @@ class TestHBaseMaster(RMFTestCase):
     self.assertNoMoreResources()
     
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
                    classname = "HbaseMaster",
                    command = "stop",
-                   config_file="default.json"
+                   config_file="default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
@@ -64,10 +73,12 @@ class TestHBaseMaster(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_decom_default(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
                        classname = "HbaseMaster",
                        command = "decommission",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('File', '/usr/lib/hbase/bin/draining_servers.rb',
@@ -93,10 +104,12 @@ class TestHBaseMaster(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_decom_default_draining_only(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
                        classname = "HbaseMaster",
                        command = "decommission",
-                       config_file="default.hbasedecom.json"
+                       config_file="default.hbasedecom.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('File', '/usr/lib/hbase/bin/draining_servers.rb',
@@ -110,20 +123,24 @@ class TestHBaseMaster(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
                    classname = "HbaseMaster",
                    command = "configure",
-                   config_file="secured.json"
+                   config_file="secured.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assert_configure_secured()
     self.assertNoMoreResources()
     
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
                    classname = "HbaseMaster",
                    command = "start",
-                   config_file="secured.json"
+                   config_file="secured.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assert_configure_secured()
@@ -134,10 +151,12 @@ class TestHBaseMaster(RMFTestCase):
     self.assertNoMoreResources()
     
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
                    classname = "HbaseMaster",
                    command = "stop",
-                   config_file="secured.json"
+                   config_file="secured.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
@@ -151,10 +170,12 @@ class TestHBaseMaster(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_decom_secure(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_master.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
                        classname = "HbaseMaster",
                        command = "decommission",
-                       config_file="secured.json"
+                       config_file="secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('File', '/usr/lib/hbase/bin/draining_servers.rb',

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
index 193a0d7..a38e2b8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -22,21 +22,28 @@ from stacks.utils.RMFTestCase import *
 
 @patch("os.path.exists", new = MagicMock(return_value=True))
 class TestHbaseRegionServer(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HBASE/0.96.0.2.0/package"
+  STACK_VERSION = "2.0.6"
+
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
                    classname = "HbaseRegionServer",
                    command = "configure",
-                   config_file="default.json"
+                   config_file="default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assert_configure_default()
     self.assertNoMoreResources()
     
   def test_start_default(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
                    classname = "HbaseRegionServer",
                    command = "start",
-                   config_file="default.json"
+                   config_file="default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assert_configure_default()
@@ -47,10 +54,12 @@ class TestHbaseRegionServer(RMFTestCase):
     self.assertNoMoreResources()
     
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
                    classname = "HbaseRegionServer",
                    command = "stop",
-                   config_file="default.json"
+                   config_file="default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver',
@@ -64,20 +73,24 @@ class TestHbaseRegionServer(RMFTestCase):
     self.assertNoMoreResources()
     
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
                    classname = "HbaseRegionServer",
                    command = "configure",
-                   config_file="secured.json"
+                   config_file="secured.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assert_configure_secured()
     self.assertNoMoreResources()
     
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
                    classname = "HbaseRegionServer",
                    command = "start",
-                   config_file="secured.json"
+                   config_file="secured.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
     self.assert_configure_secured()
@@ -88,10 +101,12 @@ class TestHbaseRegionServer(RMFTestCase):
     self.assertNoMoreResources()
     
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_regionserver.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_regionserver.py",
                    classname = "HbaseRegionServer",
                    command = "stop",
-                   config_file="secured.json"
+                   config_file="secured.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop regionserver',

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
index 7dab7fc..1e6102a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
@@ -24,12 +24,16 @@ import  resource_management.libraries.functions
 
 @patch.object(resource_management.libraries.functions, "get_unique_id_and_date", new = MagicMock(return_value=''))
 class TestServiceCheck(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HBASE/0.96.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_service_check_default(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                         classname="HbaseServiceCheck",
                         command="service_check",
-                        config_file="default.json"
+                        config_file="default.json",
+                        hdp_stack_version = self.STACK_VERSION,
+                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/hbaseSmokeVerify.sh',
       content = StaticFile('hbaseSmokeVerify.sh'),
@@ -55,10 +59,12 @@ class TestServiceCheck(RMFTestCase):
     
     
   def test_service_check_secured(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/service_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
                         classname="HbaseServiceCheck",
                         command="service_check",
-                        config_file="secured.json"
+                        config_file="secured.json",
+                        hdp_stack_version = self.STACK_VERSION,
+                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/hbaseSmokeVerify.sh',
       content = StaticFile('hbaseSmokeVerify.sh'),


[13/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/17b71553
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/17b71553
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/17b71553

Branch: refs/heads/trunk
Commit: 17b71553d05cfdca9fd047bfd467f621edc284e1
Parents: 75cdb28
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Wed Dec 17 11:57:09 2014 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Dec 17 11:57:09 2014 -0800

----------------------------------------------------------------------
 ambari-agent/pom.xml                            |    1 +
 ambari-server/conf/unix/ambari.properties       |    2 +-
 ambari-server/conf/windows/ambari.properties    |    2 +-
 ambari-server/pom.xml                           |   12 +-
 .../src/main/assemblies/server-windows.xml      |    1 +
 ambari-server/src/main/assemblies/server.xml    |    1 +
 .../common-services/HDFS/2.1.0.2.0/alerts.json  |  529 ++
 .../HDFS/2.1.0.2.0/configuration/core-site.xml  |  180 +
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |  210 +
 .../2.1.0.2.0/configuration/hadoop-policy.xml   |  134 +
 .../HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml |  201 +
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |  430 +
 .../common-services/HDFS/2.1.0.2.0/metainfo.xml |  225 +
 .../common-services/HDFS/2.1.0.2.0/metrics.json | 7840 ++++++++++++++++++
 .../package/files/alert_checkpoint_time.py      |  136 +
 .../package/files/alert_ha_namenode_health.py   |  166 +
 .../2.1.0.2.0/package/files/checkForFormat.sh   |   71 +
 .../HDFS/2.1.0.2.0/package/files/checkWebUI.py  |   53 +
 .../HDFS/2.1.0.2.0/package/scripts/__init__.py  |   20 +
 .../scripts/balancer-emulator/balancer-err.log  | 1032 +++
 .../scripts/balancer-emulator/balancer.log      |   29 +
 .../scripts/balancer-emulator/hdfs-command.py   |   45 +
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py  |   88 +
 .../package/scripts/datanode_upgrade.py         |  114 +
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      |   83 +
 .../2.1.0.2.0/package/scripts/hdfs_client.py    |   53 +
 .../2.1.0.2.0/package/scripts/hdfs_datanode.py  |   62 +
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |  202 +
 .../2.1.0.2.0/package/scripts/hdfs_rebalance.py |  130 +
 .../2.1.0.2.0/package/scripts/hdfs_snamenode.py |   51 +
 .../2.1.0.2.0/package/scripts/journalnode.py    |   90 +
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |  162 +
 .../HDFS/2.1.0.2.0/package/scripts/params.py    |  289 +
 .../2.1.0.2.0/package/scripts/service_check.py  |  119 +
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py |   68 +
 .../2.1.0.2.0/package/scripts/status_params.py  |   31 +
 .../HDFS/2.1.0.2.0/package/scripts/utils.py     |  230 +
 .../2.1.0.2.0/package/scripts/zkfc_slave.py     |   71 +
 .../package/templates/exclude_hosts_list.j2     |   21 +
 .../2.1.0.2.0/package/templates/hdfs.conf.j2    |   35 +
 .../HDFS/2.1.0.2.0/package/templates/slaves.j2  |   21 +
 .../ZOOKEEPER/3.4.5.2.0/alerts.json             |   58 +
 .../3.4.5.2.0/configuration/zoo.cfg.xml         |   62 +
 .../3.4.5.2.0/configuration/zookeeper-env.xml   |   61 +
 .../3.4.5.2.0/configuration/zookeeper-log4j.xml |  101 +
 .../ZOOKEEPER/3.4.5.2.0/metainfo.xml            |   89 +
 .../ZOOKEEPER/3.4.5.2.0/package/files/zkEnv.sh  |   96 +
 .../3.4.5.2.0/package/files/zkServer.sh         |  120 +
 .../3.4.5.2.0/package/files/zkService.sh        |   26 +
 .../3.4.5.2.0/package/files/zkSmoke.sh          |   78 +
 .../3.4.5.2.0/package/scripts/__init__.py       |   20 +
 .../3.4.5.2.0/package/scripts/params.py         |   88 +
 .../3.4.5.2.0/package/scripts/service_check.py  |   46 +
 .../3.4.5.2.0/package/scripts/status_params.py  |   26 +
 .../3.4.5.2.0/package/scripts/zookeeper.py      |  111 +
 .../package/scripts/zookeeper_client.py         |   42 +
 .../package/scripts/zookeeper_server.py         |  100 +
 .../package/scripts/zookeeper_service.py        |   50 +
 .../package/templates/configuration.xsl.j2      |   42 +
 .../3.4.5.2.0/package/templates/zoo.cfg.j2      |   53 +
 .../templates/zookeeper_client_jaas.conf.j2     |   23 +
 .../package/templates/zookeeper_jaas.conf.j2    |   26 +
 .../stacks/HDP/2.0.6/services/HDFS/alerts.json  |  529 --
 .../services/HDFS/configuration/core-site.xml   |  180 -
 .../services/HDFS/configuration/hadoop-env.xml  |  210 -
 .../HDFS/configuration/hadoop-policy.xml        |  134 -
 .../services/HDFS/configuration/hdfs-log4j.xml  |  201 -
 .../services/HDFS/configuration/hdfs-site.xml   |  430 -
 .../stacks/HDP/2.0.6/services/HDFS/metainfo.xml |  201 +-
 .../stacks/HDP/2.0.6/services/HDFS/metrics.json | 7840 ------------------
 .../HDFS/package/files/alert_checkpoint_time.py |  136 -
 .../package/files/alert_ha_namenode_health.py   |  166 -
 .../HDFS/package/files/checkForFormat.sh        |   71 -
 .../services/HDFS/package/files/checkWebUI.py   |   53 -
 .../services/HDFS/package/scripts/__init__.py   |   20 -
 .../scripts/balancer-emulator/balancer-err.log  | 1032 ---
 .../scripts/balancer-emulator/balancer.log      |   29 -
 .../scripts/balancer-emulator/hdfs-command.py   |   45 -
 .../services/HDFS/package/scripts/datanode.py   |   88 -
 .../HDFS/package/scripts/datanode_upgrade.py    |  114 -
 .../2.0.6/services/HDFS/package/scripts/hdfs.py |   83 -
 .../HDFS/package/scripts/hdfs_client.py         |   53 -
 .../HDFS/package/scripts/hdfs_datanode.py       |   62 -
 .../HDFS/package/scripts/hdfs_namenode.py       |  202 -
 .../HDFS/package/scripts/hdfs_rebalance.py      |  130 -
 .../HDFS/package/scripts/hdfs_snamenode.py      |   51 -
 .../HDFS/package/scripts/journalnode.py         |   90 -
 .../services/HDFS/package/scripts/namenode.py   |  162 -
 .../services/HDFS/package/scripts/params.py     |  289 -
 .../HDFS/package/scripts/service_check.py       |  119 -
 .../services/HDFS/package/scripts/snamenode.py  |   68 -
 .../HDFS/package/scripts/status_params.py       |   31 -
 .../services/HDFS/package/scripts/utils.py      |  230 -
 .../services/HDFS/package/scripts/zkfc_slave.py |   71 -
 .../package/templates/exclude_hosts_list.j2     |   21 -
 .../HDFS/package/templates/hdfs.conf.j2         |   35 -
 .../services/HDFS/package/templates/slaves.j2   |   21 -
 .../HDP/2.0.6/services/ZOOKEEPER/alerts.json    |   58 -
 .../ZOOKEEPER/configuration/zoo.cfg.xml         |   62 -
 .../ZOOKEEPER/configuration/zookeeper-env.xml   |   61 -
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml |  101 -
 .../HDP/2.0.6/services/ZOOKEEPER/metainfo.xml   |   65 +-
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |   96 -
 .../ZOOKEEPER/package/files/zkServer.sh         |  120 -
 .../ZOOKEEPER/package/files/zkService.sh        |   26 -
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |   78 -
 .../ZOOKEEPER/package/scripts/__init__.py       |   20 -
 .../ZOOKEEPER/package/scripts/params.py         |   88 -
 .../ZOOKEEPER/package/scripts/service_check.py  |   46 -
 .../ZOOKEEPER/package/scripts/status_params.py  |   26 -
 .../ZOOKEEPER/package/scripts/zookeeper.py      |  111 -
 .../package/scripts/zookeeper_client.py         |   42 -
 .../package/scripts/zookeeper_server.py         |  100 -
 .../package/scripts/zookeeper_service.py        |   50 -
 .../package/templates/configuration.xsl.j2      |   42 -
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |   53 -
 .../templates/zookeeper_client_jaas.conf.j2     |   23 -
 .../package/templates/zookeeper_jaas.conf.j2    |   26 -
 .../services/KerberosServiceMetaInfoTest.java   |   12 +-
 .../python/stacks/2.0.6/HDFS/test_datanode.py   |   89 +-
 .../stacks/2.0.6/HDFS/test_hdfs_client.py       |    8 +-
 .../stacks/2.0.6/HDFS/test_journalnode.py       |   38 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |   92 +-
 .../stacks/2.0.6/HDFS/test_service_check.py     |   31 +-
 .../python/stacks/2.0.6/HDFS/test_snamenode.py  |   38 +-
 .../test/python/stacks/2.0.6/HDFS/test_zkfc.py  |   26 +-
 .../2.0.6/ZOOKEEPER/test_zookeeper_client.py    |   15 +-
 .../2.0.6/ZOOKEEPER/test_zookeeper_server.py    |   62 +-
 .../ZOOKEEPER/test_zookeeper_service_check.py   |   24 +-
 .../src/test/python/stacks/utils/RMFTestCase.py |   15 +-
 130 files changed, 14856 insertions(+), 14643 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-agent/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml
index 034fdb9..939c366 100644
--- a/ambari-agent/pom.xml
+++ b/ambari-agent/pom.xml
@@ -613,6 +613,7 @@
                 <resource>
                   <directory>${ambari.server.module}/src/main/resources</directory>
                   <includes>
+                    <include>common-services/**</include>
                     <include>stacks/stack_advisor.py</include>
                     <include>stacks/${stack.distribution}/**/*</include>
                     <include>custom_actions/**/*</include>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/conf/unix/ambari.properties
----------------------------------------------------------------------
diff --git a/ambari-server/conf/unix/ambari.properties b/ambari-server/conf/unix/ambari.properties
index fd1818b..e29a6e2 100644
--- a/ambari-server/conf/unix/ambari.properties
+++ b/ambari-server/conf/unix/ambari.properties
@@ -27,7 +27,7 @@ jce_policy1.6.url=http://public-repo-1.hortonworks.com/ARTIFACTS/jce_policy-6.zi
 jdk1.7.url=http://public-repo-1.hortonworks.com/ARTIFACTS/jdk-7u67-linux-x64.tar.gz
 jce_policy1.7.url=http://public-repo-1.hortonworks.com/ARTIFACTS/UnlimitedJCEPolicyJDK7.zip
 metadata.path=/var/lib/ambari-server/resources/stacks
-common.services.path=
+common.services.path=/var/lib/ambari-server/resources/common-services
 server.version.file=/var/lib/ambari-server/resources/version
 webapp.dir=/usr/lib/ambari-server/web
 bootstrap.dir=/var/run/ambari-server/bootstrap

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/conf/windows/ambari.properties
----------------------------------------------------------------------
diff --git a/ambari-server/conf/windows/ambari.properties b/ambari-server/conf/windows/ambari.properties
index 8939b73..3982bb9 100644
--- a/ambari-server/conf/windows/ambari.properties
+++ b/ambari-server/conf/windows/ambari.properties
@@ -32,7 +32,7 @@ jdk1.7.67.jcpol-file=UnlimitedJCEPolicyJDK7.zip
 jdk1.7.67.home=C:\\jdk1.7.0_67
 
 metadata.path=resources\\stacks
-common.services.path=
+common.services.path=resources\\common-services
 server.version.file=version
 webapp.dir=web
 bootstrap.dir=bootstrap

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index f1170c9..78012ba 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -166,8 +166,8 @@
             <exclude>src/main/resources/stacks/HDP/2.1.GlusterFS/services/YARN/package/templates/exclude_hosts_list.j2</exclude>
             <exclude>src/main/windows/ambari-server.cmd</exclude>
             <exclude>src/main/windows/ambari-server.ps1</exclude>
-            <exclude>src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer-err.log</exclude>
-            <exclude>src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>
+            <exclude>src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer-err.log</exclude>
+            <exclude>src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/balancer-emulator/balancer.log</exclude>
             <exclude>src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>
             <exclude>src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/balancer-emulator/balancer-err.log</exclude>
             <exclude>src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer.log</exclude>
@@ -536,6 +536,14 @@
               </sources>
             </mapping>
             <mapping>
+              <directory>/var/lib/ambari-server/resources/common-services</directory>
+              <sources>
+                <source>
+                  <location>target/classes/common-services</location>
+                </source>
+              </sources>
+            </mapping>
+            <mapping>
               <directory>/var/lib/ambari-server/resources/stacks/${stack.distribution}</directory>
               <sources>
                 <source>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/assemblies/server-windows.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/assemblies/server-windows.xml b/ambari-server/src/main/assemblies/server-windows.xml
index 87c3c2b..a3edfb7 100644
--- a/ambari-server/src/main/assemblies/server-windows.xml
+++ b/ambari-server/src/main/assemblies/server-windows.xml
@@ -169,6 +169,7 @@
         <include>host_scripts/**</include>
         <include>stacks/stack_advisor.py</include>
         <include>scripts/**</include>
+        <include>common-services/**</include>
         <include>stacks/HDPWIN/**</include>
         <include>upgrade/**</include>
       </includes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/assemblies/server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/assemblies/server.xml b/ambari-server/src/main/assemblies/server.xml
index a5a1f79..58d1a73 100644
--- a/ambari-server/src/main/assemblies/server.xml
+++ b/ambari-server/src/main/assemblies/server.xml
@@ -93,6 +93,7 @@
       <directory>src/main/resources</directory>
       <outputDirectory>/ambari-server-${project.version}/var/lib/ambari-server/resources/</outputDirectory>
       <includes>
+        <include>common-services/**</include>
         <include>stacks/stack_advisor.py</include>
         <include>stacks/${stack.distribution}/**</include>
       </includes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json
new file mode 100644
index 0000000..c731d75
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/alerts.json
@@ -0,0 +1,529 @@
+{
+  "HDFS":{
+    "service": [
+      {
+        "name": "datanode_process_percent",
+        "label": "Percent DataNodes Available",
+        "description": "This alert is triggered if the number of down DataNodes in the cluster is greater than the configured critical threshold. It aggregates the results of DataNode process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "datanode_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.1
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.3
+            }
+          }
+        }
+      },
+      {
+        "name": "datanode_storage_percent",
+        "label": "Percent DataNodes With Available Space",
+        "description": "This service-level alert is triggered if the storage on a certain percentage of DataNodes exceeds either the warning or critical threshold values.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "datanode_storage",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.1
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.3
+            }
+          }
+        }
+      },
+      {
+        "name": "journalnode_process_percent",
+        "label": "Percent JournalNodes Available",
+        "description": "This alert is triggered if the number of down JournalNodes in the cluster is greater than the configured critical threshold. It aggregates the results of JournalNode process checks.",
+        "interval": 1,
+        "scope": "SERVICE",
+        "enabled": true,
+        "source": {
+          "type": "AGGREGATE",
+          "alert_name": "journalnode_process",
+          "reporting": {
+            "ok": {
+              "text": "affected: [{1}], total: [{0}]"
+            },
+            "warning": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.33
+            },
+            "critical": {
+              "text": "affected: [{1}], total: [{0}]",
+              "value": 0.50
+            }
+          }
+        }
+      }
+    ],
+    "NAMENODE": [
+      {
+        "name": "namenode_webui",
+        "label": "NameNode Web UI",
+        "description": "This host-level alert is triggered if the NameNode Web UI is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.4f} seconds"
+            },
+            "warning":{
+              "text": "HTTP {0} response in {2:.4f} seconds"
+            },
+            "critical": {
+              "text": "Connection failed to {1}"
+            }
+          }
+        }
+      },
+      {
+        "name": "namenode_cpu",
+        "label": "NameNode Host CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization of the NameNode exceeds certain warning and critical thresholds. It checks the NameNode JMX Servlet for the SystemCPULoad property. The threshold values are in percent.",
+        "interval": 5,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0:.1%}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 200
+            },
+            "critical": {
+              "text": "{1} CPU, load {0:.1%}",
+              "value": 250
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+            ],
+            "value": "{0} * 100"
+          }
+        }
+      },
+      {
+        "name": "namenode_hdfs_blocks_health",
+        "label": "NameNode Blocks Health",
+        "description": "This service-level alert is triggered if the number of corrupt or missing blocks exceeds the configured critical threshold. The threshold values are in blocks.",
+        "interval": 2,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]"
+            },
+            "warning": {
+              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
+              "value": 1
+            },          
+            "critical": {
+              "text": "Total Blocks:[{1}], Missing Blocks:[{0}]",
+              "value": 1
+            },
+            "units" : "Blocks"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=FSNamesystem/MissingBlocks",
+              "Hadoop:service=NameNode,name=FSNamesystem/BlocksTotal"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "namenode_hdfs_capacity_utilization",
+        "label": "HDFS Capacity Utilization",
+        "description": "This service-level alert is triggered if the HDFS capacity utilization exceeds the configured warning and critical thresholds. It checks the NameNode JMX Servlet for the CapacityUsed and CapacityRemaining properties. The threshold values are in percent.",
+        "interval": 2,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "Capacity Used:[{2:d}%, {0}], Capacity Remaining:[{1}]"
+            },
+            "warning": {
+              "text": "Capacity Used:[{2:d}%, {0}], Capacity Remaining:[{1}]",
+              "value": 80
+            },          
+            "critical": {
+              "text": "Capacity Used:[{2:d}%, {0}], Capacity Remaining:[{1}]",
+              "value": 90
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityUsed",
+              "Hadoop:service=NameNode,name=FSNamesystemState/CapacityRemaining"
+            ],
+            "value": "{0}/({0} + {1}) * 100"
+          }
+        }
+      },
+      {
+        "name": "namenode_rpc_latency",
+        "label": "NameNode RPC Latency",
+        "description": "This host-level alert is triggered if the NameNode RPC latency exceeds the configured critical threshold. Typically an increase in the RPC processing time increases the RPC queue length, causing the average queue wait time to increase for NameNode operations. The threshold values are in milliseconds.",
+        "interval": 2,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]"
+            },
+            "warning": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 3000
+            },          
+            "critical": {
+              "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]",
+              "value": 5000
+            },
+            "units" : "ms"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcQueueTimeAvgTime",
+              "Hadoop:service=NameNode,name=RpcActivityForPort*/RpcProcessingTimeAvgTime"
+            ],
+            "value": "{0}"
+          }
+        }
+      },
+      {
+        "name": "namenode_directory_status",
+        "label": "NameNode Directory Status",
+        "description": "This host-level alert is triggered if any of the the NameNode's NameDirStatuses metric reports a failed directory. The threshold values are in the number of directories that are not healthy",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.namenode.http-address}}",
+            "https": "{{hdfs-site/dfs.namenode.https-address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "Directories are healthy"
+            },
+            "warning": {
+              "text": "Failed directory count: {1}",
+              "value": 1
+            },          
+            "critical": {
+              "text": "Failed directory count: {1}",
+              "value": 1
+            },
+            "units" : "Dirs"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=NameNode,name=NameNodeInfo/NameDirStatuses"
+            ],
+            "value": "calculate(args)\ndef calculate(args):\n  import json\n  json_statuses = json.loads({0})\n  return len(json_statuses['failed']) if 'failed' in json_statuses else 0"
+          }
+        }
+      },
+      {
+        "name": "namenode_process",
+        "label": "NameNode Process",
+        "description": "This host-level alert is triggered if the NameNode process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{hdfs-site/dfs.namenode.http-address}}",
+          "default_port": 50070,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "namenode_last_checkpoint",
+        "label": "NameNode Last Checkpoint",
+        "description": "This service-level alert will trigger if the last time that the NameNode performed a checkpoint was too long ago. It will also trigger if the number of uncommitted transactions is beyond a certain threshold.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDP/2.0.6/services/HDFS/package/files/alert_checkpoint_time.py"
+        }
+      },
+      {
+        "name": "namenode_ha_health",
+        "label": "NameNode High Availability Health",
+        "description": "This service-level alert is triggered if either the Active NameNode or Standby NameNode are not running.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "ignore_host": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDP/2.0.6/services/HDFS/package/files/alert_ha_namenode_health.py"
+        }
+      }
+    ],
+    "SECONDARY_NAMENODE": [
+      {
+        "name": "secondary_namenode_process",
+        "label": "Secondary NameNode Process",
+        "description": "This host-level alert is triggered if the Secondary NameNode process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",
+          "uri": "{{hdfs-site/dfs.namenode.secondary.http-address}}",
+          "default_port": 50071,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "JOURNALNODE": [
+      {
+        "name": "journalnode_process",
+        "label": "JournalNode Process",
+        "description": "This host-level alert is triggered if the JournalNode process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "PORT",        
+          "uri": "{{hdfs-site/dfs.journalnode.http-address}}",
+          "default_port": 8480,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],      
+    "DATANODE": [
+      {
+        "name": "datanode_process",
+        "label": "DataNode Process",
+        "description": "This host-level alert is triggered if the individual DataNode processes cannot be established to be up and listening on the network.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "PORT",        
+          "uri": "{{hdfs-site/dfs.datanode.address}}",
+          "default_port": 50010,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      },
+      {
+        "name": "datanode_webui",
+        "label": "DataNode Web UI",
+        "description": "This host-level alert is triggered if the DataNode Web UI is unreachable.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "WEB",
+          "uri": {
+            "http": "{{hdfs-site/dfs.datanode.http.address}}",
+            "https": "{{hdfs-site/dfs.datanode.https.address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "HTTP {0} response in {2:.4f} seconds"
+            },
+            "warning":{
+              "text": "HTTP {0} response in {2:.4f} seconds"
+            },
+            "critical": {
+              "text": "Connection failed to {1}"
+            }
+          }
+        }
+      },    
+      {
+        "name": "datanode_storage",
+        "label": "DataNode Storage",
+        "description": "This host-level alert is triggered if storage capacity if full on the DataNode. It checks the DataNode JMX Servlet for the Capacity and Remaining properties. The threshold values are in percent.",
+        "interval": 2,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "METRIC",
+          "uri": {
+            "http": "{{hdfs-site/dfs.datanode.http.address}}",
+            "https": "{{hdfs-site/dfs.datanode.https.address}}",
+            "https_property": "{{hdfs-site/dfs.http.policy}}",
+            "https_property_value": "HTTPS_ONLY"
+          },
+          "reporting": {
+            "ok": {
+              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:d}% Used, {1}]"
+            },
+            "warning": {
+              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:d}% Used, {1}]",
+              "value": 80
+            },
+            "critical": {
+              "text": "Remaining Capacity:[{0}], Total Capacity:[{2:d}% Used, {1}]",
+              "value": 90
+            },
+            "units" : "%"
+          },
+          "jmx": {
+            "property_list": [
+              "Hadoop:service=DataNode,name=FSDatasetState-*/Remaining",
+              "Hadoop:service=DataNode,name=FSDatasetState-*/Capacity"
+            ],
+            "value": "({1} - {0})/{1} * 100"
+          }
+        }
+      }    
+    ],
+    "ZKFC": [
+      {
+        "name": "hdfs_zookeeper_failover_controller_process",
+        "label": "ZooKeeper Failover Controller Process",
+        "description": "This host-level alert is triggered if the ZooKeeper Failover Controller process cannot be confirmed to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "PORT",        
+          "uri": "{{core-site/ha.zookeeper.quorum}}",
+          "default_port": 2181,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
new file mode 100644
index 0000000..6c72e9d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/core-site.xml
@@ -0,0 +1,180 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+ 
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>131072</value>
+    <description>The size of buffer for use in sequence files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+  </property>
+
+  <property>
+    <name>io.serializations</name>
+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+    <description> A list of comma-delimited serialization classes that can be used for obtaining serializers and deserializers.
+    </description>
+  </property>
+
+  <property>
+    <name>io.compression.codecs</name>
+    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
+    <description>A list of the compression codec classes that can be used
+                 for compression/decompression.</description>
+  </property>
+
+<!-- file system properties -->
+
+  <property>
+    <name>fs.defaultFS</name>
+    <!-- cluster variant -->
+    <value>hdfs://localhost:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes between trash checkpoints.
+  If zero, the trash feature is disabled.
+  </description>
+  </property>
+
+  <!-- ipc properties: copied from kryptonite configuration -->
+  <property>
+    <name>ipc.client.idlethreshold</name>
+    <value>8000</value>
+    <description>Defines the threshold number of connections after which
+               connections will be inspected for idleness.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>30000</value>
+    <description>The maximum time after which a client will bring down the
+               connection to the server.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connect.max.retries</name>
+    <value>50</value>
+    <description>Defines the maximum number of retries for IPC connections.</description>
+  </property>
+
+  <property>
+    <name>ipc.server.tcpnodelay</name>
+    <value>true</value>
+    <description>Turn on/off Nagle's algorithm for the TCP socket
+      connection on
+      the server. Setting to true disables the algorithm and may
+      decrease latency
+      with a cost of more/smaller packets.
+    </description>
+  </property>
+
+  <!-- Web Interface Configuration -->
+  <property>
+    <name>mapreduce.jobtracker.webinterface.trusted</name>
+    <value>false</value>
+    <description> If set to true, the web interfaces of JT and NN may contain
+                actions, such as kill job, delete file, etc., that should
+                not be exposed to public. Enable this option if the interfaces
+                are only reachable by those who have the right authorization.
+  </description>
+  </property>
+
+ <property>
+   <name>hadoop.security.authentication</name>
+   <value>simple</value>
+   <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+ </property>
+<property>
+  <name>hadoop.security.authorization</name>
+  <value>false</value>
+  <description>
+     Enable authorization for different protocols.
+  </description>
+</property>
+
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>
+        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
+        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
+        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
+        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
+        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
+        DEFAULT
+    </value>
+<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
new file mode 100644
index 0000000..1d6618d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
@@ -0,0 +1,210 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_root_logger</name>
+    <value>INFO,RFA</value>
+    <description>Hadoop Root Logger</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <property-type>GROUP</property-type>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <property-type>USER</property-type>
+    <description>User to run HDFS as</description>
+  </property>
+  <property>
+    <name>dfs.datanode.data.dir.mount.file</name>
+    <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value>
+    <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description>
+  </property>
+
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hadoop-env.sh file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+  
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-policy.xml
new file mode 100644
index 0000000..41bde16
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-policy.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.tracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
+    communicate with the jobtracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for JobSubmissionProtocol, used by job clients to
+    communciate with the jobtracker for job submission, querying job status etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+<property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..08822eb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml
@@ -0,0 +1,201 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j.properties</description>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+# To change daemon root logger use hadoop_root_logger in hadoop-env
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
new file mode 100644
index 0000000..e67ed9f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hdfs-site.xml
@@ -0,0 +1,430 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+
+  <!-- file system properties -->
+
+  <property>
+    <name>dfs.namenode.name.dir</name>
+    <!-- cluster variant -->
+    <value>/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>true</value>
+    <description>Whether to enable WebHDFS feature</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+      should store its blocks.  If this is a comma-delimited
+      list of directories, then data will be stored in all named
+      directories, typically on different devices.
+      Directories that do not exist are ignored.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+      not permitted to connect to the namenode.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.</description>
+  </property>
+
+  <!--
+    <property>
+      <name>dfs.hosts</name>
+      <value>/etc/hadoop/conf/dfs.include</value>
+      <description>Names a file that contains a list of hosts that are
+      permitted to connect to the namenode. The full pathname of the file
+      must be specified.  If the value is empty, all hosts are
+      permitted.</description>
+    </property>
+  -->
+
+  <property>
+    <name>dfs.namenode.checkpoint.dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary images to merge.
+      If this is a comma-delimited list of directories then the image is
+      replicated in all of the directories for redundancy.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.checkpoint.edits.dir</name>
+    <value>${dfs.namenode.checkpoint.dir}</value>
+    <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary edits to merge.
+      If this is a comma-delimited list of directoires then teh edits is
+      replicated in all of the directoires for redundancy.
+      Default value is same as dfs.namenode.checkpoint.dir
+    </description>
+  </property>
+
+
+  <property>
+    <name>dfs.namenode.checkpoint.period</name>
+    <value>21600</value>
+    <description>The number of seconds between two periodic checkpoints.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.checkpoint.txns</name>
+    <value>1000000</value>
+    <description>The Secondary NameNode or CheckpointNode will create a checkpoint
+      of the namespace every 'dfs.namenode.checkpoint.txns' transactions,
+      regardless of whether 'dfs.namenode.checkpoint.period' has expired.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.safemode.threshold-pct</name>
+    <value>1.0f</value>
+    <description>
+      Specifies the percentage of blocks that should satisfy
+      the minimal replication requirement defined by dfs.namenode.replication.min.
+      Values less than or equal to 0 mean not to start in safe mode.
+      Values greater than 1 will make safe mode permanent.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+      Specifies the maximum amount of bandwidth that each datanode
+      can utilize for the balancing purpose in term of
+      the number of bytes per second.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+    <description>
+      This property is used by HftpFileSystem.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+    <description>
+      The datanode server address and port for data transfer.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+    <description>
+      The datanode http server address and port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.https.address</name>
+    <value>0.0.0.0:50475</value>
+    <description>
+      The datanode https server address and port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.blocksize</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.http-address</name>
+    <value>localhost:50070</value>
+    <description>The name of the default file system.  Either the
+      literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <!-- cluster variant -->
+    <value>1073741824</value>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
+    <description>
+      The datanode ipc server address and port.
+      If the port is 0 then the server will start on a free port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>40</value>
+    <description>The number of server threads for the namenode.</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.max.transfer.threads</name>
+    <value>1024</value>
+    <description>Specifies the maximum number of threads to use for transferring data in and out of the datanode.</description>
+  </property>
+
+  <!-- Permissions configuration -->
+
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>022</value>
+    <description>
+      The octal umask used when creating files and directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.enabled</name>
+    <value>true</value>
+    <description>
+      If "true", enable permission checking in HDFS.
+      If "false", permission checking is turned off,
+      but all other behavior is unchanged.
+      Switching from one parameter value to the other does not change the mode,
+      owner or group of files or directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.superusergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>100</value>
+    <description>Added to grow Queue size so that more client connections are allowed</description>
+  </property>
+
+  <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>true</value>
+    <description>
+      If "true", access tokens are used as capabilities for accessing datanodes.
+      If "false", no access tokens are checked on accessing datanodes.
+    </description>
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.namenode.secondary.http-address</name>
+    <value>localhost:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+
+  <property>
+    <name>dfs.namenode.https-address</name>
+    <value>localhost:50470</value>
+    <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+    <description>The permissions that should be there on dfs.datanode.data.dir
+      directories. The datanode will not come up if the permissions are
+      different on existing dfs.datanode.data.dir directories. If the directories
+      don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.accesstime.precision</name>
+    <value>0</value>
+    <description>The access time for HDFS file is precise upto this value.
+      The default value is 1 hour. Setting a value of 0 disables
+      access times for HDFS.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.cluster.administrators</name>
+    <value> hdfs</value>
+    <description>ACL for who all can view the default servlets in the HDFS</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.avoid.read.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid reading from stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.avoid.write.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid writing to stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.write.stale.datanode.ratio</name>
+    <value>1.0f</value>
+    <description>When the ratio of number stale datanodes to total datanodes marked is greater
+      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.stale.datanode.interval</name>
+    <value>30000</value>
+    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
+  </property>
+
+  <property>
+    <name>dfs.journalnode.http-address</name>
+    <value>0.0.0.0:8480</value>
+    <description>The address and port the JournalNode web UI listens on.
+      If the port is 0 then the server will start on a free port. </description>
+  </property>
+
+  <property>
+    <name>dfs.journalnode.edits.dir</name>
+    <value>/grid/0/hdfs/journal</value>
+    <description>The path where the JournalNode daemon will store its local state. </description>
+  </property>
+
+  <!-- HDFS Short-Circuit Local Reads -->
+
+  <property>
+    <name>dfs.client.read.shortcircuit</name>
+    <value>true</value>
+    <description>
+      This configuration parameter turns on short-circuit local reads.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>
+      This is a path to a UNIX domain socket that will be used for communication between the DataNode and local HDFS clients.
+      If the string "_PORT" is present in this path, it will be replaced by the TCP port of the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
+    <value>4096</value>
+    <description>
+      The DFSClient maintains a cache of recently opened file descriptors. This
+      parameter controls the size of that cache. Setting this higher will use
+      more file descriptors, but potentially provide better performance on
+      workloads involving lots of seeks.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.name.dir.restore</name>
+    <value>true</value>
+    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
+      When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
+      The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
+      Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
new file mode 100644
index 0000000..93504a4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/metainfo.xml
@@ -0,0 +1,225 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <displayName>HDFS</displayName>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.1.0.2.0</version>
+
+      <components>
+        <component>
+          <name>NAMENODE</name>
+          <displayName>NameNode</displayName>
+          <category>MASTER</category>
+          <cardinality>1-2</cardinality>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>REBALANCEHDFS</name>
+              <background>true</background>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>DATANODE</name>
+          <displayName>DataNode</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>SECONDARY_NAMENODE</name>
+          <displayName>SNameNode</displayName>
+          <!-- TODO:  cardinality is conditional on HA usage -->
+          <cardinality>1</cardinality>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HDFS_CLIENT</name>
+          <displayName>HDFS Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hdfs-site.xml</fileName>
+              <dictionaryName>hdfs-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>                          
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-env.sh</fileName>
+              <dictionaryName>hadoop-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+
+        <component>
+          <name>JOURNALNODE</name>
+          <displayName>JournalNode</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/journalnode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZKFC</name>
+          <displayName>ZKFailoverController</displayName>
+          <category>SLAVE</category>
+          <!-- TODO: cardinality is conditional on HA topology -->
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/zkfc_slave.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop</name>
+            </package>
+            <package>
+              <name>hadoop-lzo</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>snappy</name>
+            </package>
+            <package>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <name>lzo</name>
+            </package>
+            <package>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <name>hadoop-libhdfs</name>
+            </package>
+            <package>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+        
+        <osSpecific>
+          <osFamily>ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>libsnappy1</name>
+            </package>
+            <package>
+              <name>libsnappy-dev</name>
+            </package>
+            <package>
+              <name>liblzo2-2</name>
+            </package>
+            <package>
+              <name>hadoop-hdfs</name>
+            </package>
+            <package>
+              <name>libhdfs0</name>
+            </package>
+            <package>
+              <name>libhdfs0-dev</name>
+            </package>
+          </packages>
+        </osSpecific>
+            
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>


[34/37] ambari git commit: AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.oracle.sql b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.oracle.sql
new file mode 100644
index 0000000..812b897
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.oracle.sql
@@ -0,0 +1,718 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(128) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(128) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL, 
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(128) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.postgres.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.postgres.sql b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.postgres.sql
new file mode 100644
index 0000000..bc6486b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/etc/hive-schema-0.12.0.postgres.sql
@@ -0,0 +1,1406 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_OLD" (
+    "SD_ID" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(128) NOT NULL,
+    "TYPE_NAME" character varying(4000),
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_OLD"
+    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
+--
+-- PostgreSQL database dump complete
+--
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
new file mode 100644
index 0000000..26edb84
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/metainfo.xml
@@ -0,0 +1,294 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <displayName>Hive</displayName>
+      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+      <version>0.12.0.2.0</version>
+      <components>
+
+        <component>
+          <name>HIVE_METASTORE</name>
+          <displayName>Hive Metastore</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>HIVE/HIVE_SERVER</co-locate>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/hive_metastore.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+
+        <component>
+          <name>HIVE_SERVER</name>
+          <displayName>HiveServer2</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hive_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <displayName>WebHCat Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <clientsToUpdateConfigs>
+            <client>HCAT</client>
+          </clientsToUpdateConfigs>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>PIG/PIG</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/webhcat_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>MYSQL_SERVER</name>
+          <displayName>MySQL Server</displayName>
+          <category>MASTER</category>
+          <cardinality>0-1</cardinality>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
+          <commandScript>
+            <script>scripts/mysql_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>CLEAN</name>
+                <commandScript>
+                  <script>scripts/mysql_server.py</script>
+                  <scriptType>PYTHON</scriptType>
+                  <timeout>600</timeout>
+            </commandScript>
+                </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HIVE_CLIENT</name>
+          <displayName>Hive Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/hive_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hive-site.xml</fileName>
+              <dictionaryName>hive-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-env.sh</fileName>
+              <dictionaryName>hive-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-log4j.properties</fileName>
+              <dictionaryName>hive-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-exec-log4j.properties</fileName>
+              <dictionaryName>hive-exec-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>HCAT</name>
+          <displayName>HCat Client</displayName>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hcat_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>env</type>
+              <fileName>hcat-env.sh</fileName>
+              <dictionaryName>hcat-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+        <configuration-dependencies>
+          <config-type>hive-site</config-type>
+        </configuration-dependencies>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hive</name>
+            </package>
+            <package>
+              <name>hcatalog</name>
+            </package>
+            <package>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <name>webhcat-tar-pig</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>YARN</service>
+        <service>TEZ</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>hive-log4j</config-type>
+        <config-type>hive-exec-log4j</config-type>
+        <config-type>hive-env</config-type>
+        <config-type>webhcat-site</config-type>
+        <config-type>webhcat-env</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts.json b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts.json
new file mode 100644
index 0000000..3762492
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts.json
@@ -0,0 +1,60 @@
+{
+  "HIVE": {
+    "service": [],
+    "HIVE_METASTORE": [
+      {
+        "name": "hive_metastore_process",
+        "label": "Hive Metastore Process",
+        "description": "This host-level alert is triggered if the Hive Metastore process cannot be determined to be up and listening on the network.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "PORT",
+          "uri": "{{hive-site/hive.metastore.uris}}",
+          "default_port": 9083,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ],
+    "HIVE_SERVER": [
+      {
+        "name": "hive_server_process",
+        "label": "HiveServer2 Process",
+        "description": "This host-level alert is triggered if the HiveServer cannot be determined to be up and responding to client requests.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDP/2.0.6/services/HIVE/package/files/alert_hive_thrift_port.py"
+        }
+      }
+    ],
+    "WEBHCAT_SERVER": [
+      {
+        "name": "hive_webhcat_server_status",
+        "label": "WebHCat Server Status",
+        "description": "This host-level alert is triggered if the templeton server status is not healthy.",
+        "interval": 1,
+        "scope": "ANY",
+        "enabled": true,
+        "source": {
+          "type": "SCRIPT",
+          "path": "HDP/2.0.6/services/HIVE/package/files/alert_webhcat_server.py"
+        }
+      }    
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hcat-env.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hcat-env.xml
new file mode 100644
index 0000000..91b402b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hcat-env.xml
@@ -0,0 +1,57 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- hcat-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hcat-env.sh file</description>
+    <value>
+      # Licensed to the Apache Software Foundation (ASF) under one
+      # or more contributor license agreements. See the NOTICE file
+      # distributed with this work for additional information
+      # regarding copyright ownership. The ASF licenses this file
+      # to you under the Apache License, Version 2.0 (the
+      # "License"); you may not use this file except in compliance
+      # with the License. You may obtain a copy of the License at
+      #
+      # http://www.apache.org/licenses/LICENSE-2.0
+      #
+      # Unless required by applicable law or agreed to in writing, software
+      # distributed under the License is distributed on an "AS IS" BASIS,
+      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      # See the License for the specific language governing permissions and
+      # limitations under the License.
+
+      JAVA_HOME={{java64_home}}
+      HCAT_PID_DIR={{hcat_pid_dir}}/
+      HCAT_LOG_DIR={{hcat_log_dir}}/
+      HCAT_CONF_DIR={{hcat_conf_dir}}
+      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+      #DBROOT is the path where the connector jars are downloaded
+      DBROOT={{hcat_dbroot}}
+      USER={{hcat_user}}
+      METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-env.xml b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-env.xml
new file mode 100644
index 0000000..52cfa10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/configuration/hive-env.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hive_database_type</name>
+    <value>mysql</value>
+    <description>Default HIVE DB type.</description>
+  </property>
+  <property>
+    <name>hive_database</name>
+    <value>New MySQL Database</value>
+    <description>
+      Property that determines whether the HIVE DB is managed by Ambari.
+    </description>
+  </property>
+  <property>
+    <name>hive_ambari_database</name>
+    <value>MySQL</value>
+    <description>Database type.</description>
+  </property>
+  <property>
+    <name>hive_database_name</name>
+    <value>hive</value>
+    <description>Database name.</description>
+  </property>
+  <property>
+    <name>hive_dbroot</name>
+    <value>/usr/lib/hive/lib/</value>
+    <description>Hive DB Directory.</description>
+  </property>
+  <property>
+    <name>hive_log_dir</name>
+    <value>/var/log/hive</value>
+    <description>Directory for Hive Log files.</description>
+  </property>
+  <property>
+    <name>hive_pid_dir</name>
+    <value>/var/run/hive</value>
+    <description>Hive PID Dir.</description>
+  </property>
+  <property>
+    <name>hive_user</name>
+    <value>hive</value>
+    <property-type>USER</property-type>
+    <description>Hive User.</description>
+  </property>
+
+  <!--HCAT-->
+
+  <property>
+    <name>hcat_log_dir</name>
+    <value>/var/log/webhcat</value>
+    <description>WebHCat Log Dir.</description>
+  </property>
+  <property>
+    <name>hcat_pid_dir</name>
+    <value>/var/run/webhcat</value>
+    <description>WebHCat Pid Dir.</description>
+  </property>
+  <property>
+    <name>hcat_user</name>
+    <value>hcat</value>
+    <property-type>USER</property-type>
+    <description>HCat User.</description>
+  </property>
+  <property>
+    <name>webhcat_user</name>
+    <value>hcat</value>
+    <property-type>USER</property-type>
+    <description>WebHCat User.</description>
+  </property>
+  
+  <!-- hive-env.sh -->
+  <property>
+    <name>content</name>
+    <description>This is the jinja template for hive-env.sh file</description>
+    <value>
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hive_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_config_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+elif [ -d "/usr/lib/hive-hcatalog/" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar
+else
+  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar
+fi
+export METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+  
+</configuration>


[15/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/draining_servers.rb
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/draining_servers.rb b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/draining_servers.rb
deleted file mode 100644
index 5bcb5b6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/draining_servers.rb
+++ /dev/null
@@ -1,164 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Add or remove servers from draining mode via zookeeper 
-
-require 'optparse'
-include Java
-
-import org.apache.hadoop.hbase.HBaseConfiguration
-import org.apache.hadoop.hbase.client.HBaseAdmin
-import org.apache.hadoop.hbase.zookeeper.ZKUtil
-import org.apache.commons.logging.Log
-import org.apache.commons.logging.LogFactory
-
-# Name of this script
-NAME = "draining_servers"
-
-# Do command-line parsing
-options = {}
-optparse = OptionParser.new do |opts|
-  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
-  opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
-                 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
-  opts.on('-h', '--help', 'Display usage information') do
-    puts opts
-    exit
-  end
-  options[:debug] = false
-  opts.on('-d', '--debug', 'Display extra debug logging') do
-    options[:debug] = true
-  end
-end
-optparse.parse!
-
-# Return array of servernames where servername is hostname+port+startcode
-# comma-delimited
-def getServers(admin)
-  serverInfos = admin.getClusterStatus().getServerInfo()
-  servers = []
-  for server in serverInfos
-    servers << server.getServerName()
-  end
-  return servers
-end
-
-def getServerNames(hostOrServers, config)
-  ret = []
-  
-  for hostOrServer in hostOrServers
-    # check whether it is already serverName. No need to connect to cluster
-    parts = hostOrServer.split(',')
-    if parts.size() == 3
-      ret << hostOrServer
-    else 
-      admin = HBaseAdmin.new(config) if not admin
-      servers = getServers(admin)
-
-      hostOrServer = hostOrServer.gsub(/:/, ",")
-      for server in servers 
-        ret << server if server.start_with?(hostOrServer)
-      end
-    end
-  end
-  
-  admin.close() if admin
-  return ret
-end
-
-def addServers(options, hostOrServers)
-  config = HBaseConfiguration.create()
-  servers = getServerNames(hostOrServers, config)
-  
-  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
-  parentZnode = zkw.drainingZNode
-  
-  begin
-    for server in servers
-      node = ZKUtil.joinZNode(parentZnode, server)
-      ZKUtil.createAndFailSilent(zkw, node)
-    end
-  ensure
-    zkw.close()
-  end
-end
-
-def removeServers(options, hostOrServers)
-  config = HBaseConfiguration.create()
-  servers = getServerNames(hostOrServers, config)
-  
-  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
-  parentZnode = zkw.drainingZNode
-  
-  begin
-    for server in servers
-      node = ZKUtil.joinZNode(parentZnode, server)
-      ZKUtil.deleteNodeFailSilent(zkw, node)
-    end
-  ensure
-    zkw.close()
-  end
-end
-
-# list servers in draining mode
-def listServers(options)
-  config = HBaseConfiguration.create()
-  
-  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
-  parentZnode = zkw.drainingZNode
-
-  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
-  servers.each {|server| puts server}
-end
-
-hostOrServers = ARGV[1..ARGV.size()]
-
-# Create a logger and disable the DEBUG-level annoying client logging
-def configureLogging(options)
-  apacheLogger = LogFactory.getLog(NAME)
-  # Configure log4j to not spew so much
-  unless (options[:debug]) 
-    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
-    logger.setLevel(org.apache.log4j.Level::WARN)
-    logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
-    logger.setLevel(org.apache.log4j.Level::WARN)
-  end
-  return apacheLogger
-end
-
-# Create a logger and save it to ruby global
-$LOG = configureLogging(options)
-case ARGV[0]
-  when 'add'
-    if ARGV.length < 2
-      puts optparse
-      exit 1
-    end
-    addServers(options, hostOrServers)
-  when 'remove'
-    if ARGV.length < 2
-      puts optparse
-      exit 1
-    end
-    removeServers(options, hostOrServers)
-  when 'list'
-    listServers(options)
-  else
-    puts optparse
-    exit 3
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh
deleted file mode 100644
index 5c320c0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-conf_dir=$1
-data=$2
-hbase_cmd=$3
-echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
-cat /tmp/hbase_chk_verify
-echo "Looking for $data"
-grep -q $data /tmp/hbase_chk_verify
-if [ "$?" -ne 0 ]
-then
-  exit 1
-fi
-
-grep -q '1 row(s)' /tmp/hbase_chk_verify

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py
deleted file mode 100644
index e6e7fb9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/functions.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import math
-import datetime
-
-from resource_management.core.shell import checked_call
-
-def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
-  """
-  @param heapsize_str: str (e.g '1000m')
-  @param xmn_percent: float (e.g 0.2)
-  @param xmn_max: integer (e.g 512)
-  """
-  heapsize = int(re.search('\d+',heapsize_str).group(0))
-  heapsize_unit = re.search('\D+',heapsize_str).group(0)
-  xmn_val = int(math.floor(heapsize*xmn_percent))
-  xmn_val -= xmn_val % 8
-  
-  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
-  return str(result_xmn_val) + heapsize_unit

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py
deleted file mode 100644
index ea99288..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-from resource_management import *
-import sys
-
-def hbase(name=None # 'master' or 'regionserver' or 'client'
-              ):
-  import params
-
-  Directory( params.hbase_conf_dir_prefix,
-      mode=0755
-  )
-
-  Directory( params.hbase_conf_dir,
-      owner = params.hbase_user,
-      group = params.user_group,
-      recursive = True
-  )
-
-  Directory (params.tmp_dir,
-             owner = params.hbase_user,
-             mode=0775,
-             recursive = True,
-             recursive_permission = True
-  )
-
-  Directory (params.local_dir,
-             owner = params.hbase_user,
-             group = params.user_group,
-             mode=0775,
-             recursive = True
-  )
-
-  Directory (os.path.join(params.local_dir, "jars"),
-             owner = params.hbase_user,
-             group = params.user_group,
-             mode=0775,
-             recursive = True
-  )
-
-  XmlConfig( "hbase-site.xml",
-            conf_dir = params.hbase_conf_dir,
-            configurations = params.config['configurations']['hbase-site'],
-            configuration_attributes=params.config['configuration_attributes']['hbase-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-
-  if 'hdfs-site' in params.config['configurations']:
-    XmlConfig( "hdfs-site.xml",
-            conf_dir = params.hbase_conf_dir,
-            configurations = params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-    )
-
-    XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-    )
-
-  if 'hbase-policy' in params.config['configurations']:
-    XmlConfig( "hbase-policy.xml",
-            conf_dir = params.hbase_conf_dir,
-            configurations = params.config['configurations']['hbase-policy'],
-            configuration_attributes=params.config['configuration_attributes']['hbase-policy'],
-            owner = params.hbase_user,
-            group = params.user_group
-    )
-  # Manually overriding ownership of file installed by hadoop package
-  else: 
-    File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
-      owner = params.hbase_user,
-      group = params.user_group
-    )
-
-  File(format("{hbase_conf_dir}/hbase-env.sh"),
-       owner = params.hbase_user,
-       content=InlineTemplate(params.hbase_env_sh_template)
-  )     
-       
-  hbase_TemplateConfig( params.metric_prop_file_name,
-    tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
-  )
-
-  hbase_TemplateConfig( 'regionservers')
-
-  if params.security_enabled:
-    hbase_TemplateConfig( format("hbase_{name}_jaas.conf"))
-  
-  if name != "client":
-    Directory( params.pid_dir,
-      owner = params.hbase_user,
-      recursive = True
-    )
-  
-    Directory (params.log_dir,
-      owner = params.hbase_user,
-      recursive = True
-    )
-
-  if (params.log4j_props != None):
-    File(format("{params.hbase_conf_dir}/log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hbase_user,
-         content=params.log4j_props
-    )
-  elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
-    File(format("{params.hbase_conf_dir}/log4j.properties"),
-      mode=0644,
-      group=params.user_group,
-      owner=params.hbase_user
-    )
-  if name in ["master","regionserver"]:
-    params.HdfsDirectory(params.hbase_hdfs_root_dir,
-                         action="create_delayed",
-                         owner=params.hbase_user
-    )
-    params.HdfsDirectory(params.hbase_staging_dir,
-                         action="create_delayed",
-                         owner=params.hbase_user,
-                         mode=0711
-    )
-    params.HdfsDirectory(None, action="create")
-
-def hbase_TemplateConfig(name, 
-                         tag=None
-                         ):
-  import params
-
-  TemplateConfig( format("{hbase_conf_dir}/{name}"),
-      owner = params.hbase_user,
-      template_tag = tag
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py
deleted file mode 100644
index 043ad11..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-
-         
-class HbaseClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    
-    hbase(name='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-if __name__ == "__main__":
-  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
deleted file mode 100644
index a623927..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_decommission.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def hbase_decommission(env):
-  import params
-
-  env.set_params(params)
-  kinit_cmd = params.kinit_cmd
-
-  File(params.region_drainer,
-       content=StaticFile("draining_servers.rb"),
-       mode=0755
-  )
-  
-  if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
-    hosts = params.hbase_excluded_hosts.split(",")
-  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
-    hosts = params.hbase_included_hosts.split(",")
-
-  if params.hbase_drain_only:
-    for host in hosts:
-      if host:
-        regiondrainer_cmd = format(
-          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove {host}")
-        Execute(regiondrainer_cmd,
-                user=params.hbase_user,
-                logoutput=True
-        )
-        pass
-    pass
-
-  else:
-    for host in hosts:
-      if host:
-        regiondrainer_cmd = format(
-          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
-        regionmover_cmd = format(
-          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
-
-        Execute(regiondrainer_cmd,
-                user=params.hbase_user,
-                logoutput=True
-        )
-
-        Execute(regionmover_cmd,
-                user=params.hbase_user,
-                logoutput=True
-        )
-      pass
-    pass
-  pass
-  
-
-  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py
deleted file mode 100644
index c0e84b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_master.py
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-from hbase_decommission import hbase_decommission
-import upgrade
-
-         
-class HbaseMaster(Script):
-  def install(self, env):
-    self.install_packages(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(name='master')
-    
-  def pre_rolling_restart(self, env):
-    import params
-    env.set_params(params)
-    upgrade.prestart(env, "hbase-master")
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'master',
-      action = 'start'
-    )
-    
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'master',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
-    check_process_status(pid_file)
-
-  def decommission(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_decommission(env)
-
-
-if __name__ == "__main__":
-  HbaseMaster().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py
deleted file mode 100644
index ea8e3d4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_regionserver.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-import upgrade
-
-         
-class HbaseRegionServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(name='regionserver')
-      
-  def pre_rolling_restart(self, env):
-    import params
-    env.set_params(params)
-    upgrade.prestart(env, "hbase-regionserver")
-
-  def post_rolling_restart(self, env):
-    import params
-    env.set_params(params)
-    upgrade.post_regionserver(env)
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'regionserver',
-      action = 'start'
-    )
-
-    
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'regionserver',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
-    check_process_status(pid_file)
-    
-  def decommission(self, env):
-    print "Decommission not yet implemented!"
-    
-
-if __name__ == "__main__":
-  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py
deleted file mode 100644
index 99cae6a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase_service.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def hbase_service(
-  name,
-  action = 'start'): # 'start' or 'stop' or 'status'
-    
-    import params
-  
-    role = name
-    cmd = format("{daemon_script} --config {hbase_conf_dir}")
-    pid_file = format("{pid_dir}/hbase-{hbase_user}-{role}.pid")
-    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
-    
-    if action == 'start':
-      daemon_cmd = format("{cmd} start {role}")
-      
-      Execute ( daemon_cmd,
-        not_if = no_op_test,
-        user = params.hbase_user
-      )
-    elif action == 'stop':
-      daemon_cmd = format("{cmd} stop {role}")
-
-      Execute ( daemon_cmd,
-        user = params.hbase_user,
-        # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
-        timeout = 30,
-        on_timeout = format("! ( {no_op_test} ) || sudo -H -E kill -9 `cat {pid_file}`"),
-      )
-      
-      Execute (format("rm -f {pid_file}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
deleted file mode 100644
index 79d47bf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from functions import calc_xmn_from_xms
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-exec_tmp_dir = Script.get_tmp_dir()
-
-version = default("/commandParams/version", None)
-
-hdp_stack_version = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
-
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  hadoop_bin_dir = format("/usr/hdp/current/hadoop-client/bin")
-  daemon_script = format('/usr/hdp/current/hbase-client/bin/hbase-daemon.sh')
-  region_mover = format('/usr/hdp/current/hbase-client/bin/region_mover.rb')
-  region_drainer = format('/usr/hdp/current/hbase-client/bin/draining_servers.rb')
-  hbase_cmd = format('/usr/hdp/current/hbase-client/bin/hbase')
-else:
-  hadoop_bin_dir = "/usr/bin"
-  daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-  region_mover = "/usr/lib/hbase/bin/region_mover.rb"
-  region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
-  hbase_cmd = "/usr/lib/hbase/bin/hbase"
-
-hadoop_conf_dir = "/etc/hadoop/conf"
-hbase_conf_dir_prefix = "/etc/hbase"
-hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")
-hbase_excluded_hosts = config['commandParams']['excluded_hosts']
-hbase_drain_only = default("/commandParams/mark_draining_only",False)
-hbase_included_hosts = config['commandParams']['included_hosts']
-
-hbase_user = status_params.hbase_user
-hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-_authentication = config['configurations']['core-site']['hadoop.security.authentication']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-# this is "hadoop-metrics.properties" for 1.x stacks
-metric_prop_file_name = "hadoop-metrics2-hbase.properties"
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-
-log_dir = config['configurations']['hbase-env']['hbase_log_dir']
-master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
-
-regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
-regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
-regionserver_xmn_percent = config['configurations']['hbase-env']['hbase_regionserver_xmn_ratio']
-regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
-
-pid_dir = status_params.pid_dir
-tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-# TODO UPGRADE default, update site during upgrade
-_local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
-local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
-
-client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
-master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
-regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
-
-ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
-ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
-
-ams_collector_hosts = default("/clusterHostInfo/metric_collector_hosts", [])
-has_metric_collector = not len(ams_collector_hosts) == 0
-if has_metric_collector:
-  metric_collector_host = ams_collector_hosts[0]
-
-# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
-if 'slave_hosts' in config['clusterHostInfo']:
-  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-else:
-  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
-
-smoke_test_user = config['configurations']['cluster-env']['smokeuser']
-smokeuser_permissions = "RWXCA"
-service_check_data = functions.get_unique_id_and_date()
-user_group = config['configurations']['cluster-env']["user_group"]
-
-if security_enabled:
-  _hostname_lowercase = config['hostname'].lower()
-  master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
-  regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
-
-master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
-regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-if security_enabled:
-  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
-else:
-  kinit_cmd = ""
-
-#log4j.properties
-if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
-  log4j_props = config['configurations']['hbase-log4j']['content']
-else:
-  log4j_props = None
-  
-hbase_env_sh_template = config['configurations']['hbase-env']['content']
-
-hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
-hbase_staging_dir = "/apps/hbase/staging"
-#for create_hdfs_directory
-hostname = config["hostname"]
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
deleted file mode 100644
index 15a306b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import functions
-
-
-class HbaseServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    output_file = "/apps/hbase/data/ambarismoketest"
-    test_cmd = format("fs -test -e {output_file}")
-    smokeuser_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
-    hbase_servicecheck_file = format("{exec_tmp_dir}/hbase-smoke.sh")
-  
-    File( format("{exec_tmp_dir}/hbaseSmokeVerify.sh"),
-      content = StaticFile("hbaseSmokeVerify.sh"),
-      mode = 0755
-    )
-  
-    File( hbase_servicecheck_file,
-      mode = 0755,
-      content = Template('hbase-smoke.sh.j2')
-    )
-    
-    if params.security_enabled:    
-      hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
-      grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}")
-  
-      File( hbase_grant_premissions_file,
-        owner   = params.hbase_user,
-        group   = params.user_group,
-        mode    = 0644,
-        content = Template('hbase_grant_permissions.j2')
-      )
-      
-      Execute( grantprivelegecmd,
-        user = params.hbase_user,
-      )
-
-    servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
-    smokeverifycmd = format("{smokeuser_kinit_cmd} {exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}")
-  
-    Execute( servicecheckcmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-  
-    Execute ( smokeverifycmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-    
-if __name__ == "__main__":
-  HbaseServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
deleted file mode 100644
index 850ec8b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
-hbase_user = config['configurations']['hbase-env']['hbase_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/upgrade.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/upgrade.py
deleted file mode 100644
index 6f2e258..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/upgrade.py
+++ /dev/null
@@ -1,49 +0,0 @@
-
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management import *
-from resource_management.core.resources.system import Execute
-from resource_management.core.shell import call
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
-from resource_management.libraries.functions.decorator import retry
-
-def prestart(env, hdp_component):
-  import params
-
-  if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
-    Execute("hdp-select set {0} {1}".format(hdp_component, params.version))
-
-def post_regionserver(env):
-  import params
-  env.set_params(params)
-
-  check_cmd = "echo 'status \"simple\"' | {0} shell".format(params.hbase_cmd)
-
-  exec_cmd = "{0} {1}".format(params.kinit_cmd, check_cmd)
-  call_and_match(exec_cmd, params.hbase_user, params.hostname.lower() + ":")
-
-
-@retry(times=15, sleep_time=2, err_class=Fail)
-def call_and_match(cmd, user, regex):
-
-  code, out = call(cmd, user=user)
-
-  if not (out and re.search(regex, out)):
-    raise Fail("Could not verify RS available")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
deleted file mode 100644
index b07378b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
+++ /dev/null
@@ -1,108 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-{% if has_metric_collector %}
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-*.sink.timline*.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-hbase.period=10
-hbase.collector={{metric_collector_host}}:8188
-
-jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-jvm.period=10
-jvm.collector={{metric_collector_host}}:8188
-
-rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-rpc.period=10
-rpc.collector={{metric_collector_host}}:8188
-
-hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-hbase.sink.timeline.period=10
-hbase.sink.timeline.collector={{metric_collector_host}}:8188
-
-{% else %}
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8663
-
-#Ganglia following hadoop example
-hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-hbase.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-hbase.sink.ganglia.servers={{ganglia_server_host}}:8663
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
deleted file mode 100644
index d13540f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
+++ /dev/null
@@ -1,102 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-{% if has_metric_collector %}
-
-hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-hbase.period=10
-hbase.collector={{metric_collector_host}}:8188
-
-jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-jvm.period=10
-jvm.collector={{metric_collector_host}}:8188
-
-rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-rpc.period=10
-rpc.collector={{metric_collector_host}}:8188
-
-hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-hbase.sink.timeline.period=10
-hbase.sink.timeline.collector={{metric_collector_host}}:8188
-
-{% else %}
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8656
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8656
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8656
-
-#Ganglia following hadoop example
-hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-hbase.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-hbase.sink.ganglia.servers={{ganglia_server_host}}:8656
-
-{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2
deleted file mode 100644
index 458da95..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-smoke.sh.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
-scan 'ambarismoketest'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2
deleted file mode 100644
index 38f9721..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_client_jaas.conf.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2
deleted file mode 100644
index 3378983..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_grant_permissions.j2
+++ /dev/null
@@ -1,39 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2
deleted file mode 100644
index a93c36c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_master_jaas.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{master_keytab_path}}"
-principal="{{master_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
deleted file mode 100644
index 7097481..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{regionserver_keytab_path}}"
-principal="{{regionserver_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2
deleted file mode 100644
index fc6cc37..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/regionservers.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in rs_hosts %}{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
index 1a14b6a..7d97245 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
@@ -25,12 +25,16 @@ import os
 
 
 class TestFlumeHandler(RMFTestCase):
-
+  COMMON_SERVICES_PACKAGE_DIR = "FLUME/1.4.0.2.0/package"
+  STACK_VERSION = "2.0.6"
+  
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
                        classname = "FlumeHandler",
                        command = "configure",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assert_configure_default()
     self.assertNoMoreResources()
@@ -44,9 +48,12 @@ class TestFlumeHandler(RMFTestCase):
     os_path_isfile_mock.side_effect = [True, False]
     cmd_target_names_mock.return_value = ["a1"]
 
-    self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
-                       classname = "FlumeHandler", command = "start",
-                       config_file="default.json")
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
+                       classname = "FlumeHandler",
+                       command = "start",
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assert_configure_default()
 
@@ -70,10 +77,12 @@ class TestFlumeHandler(RMFTestCase):
   def test_stop_default(self, set_desired_mock, glob_mock):
     glob_mock.side_effect = [['/var/run/flume/a1/pid'], ['/etc/flume/conf/a1/ambari-meta.json']]
 
-    self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
                        classname = "FlumeHandler",
                        command = "stop",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertTrue(glob_mock.called)
 
@@ -92,10 +101,12 @@ class TestFlumeHandler(RMFTestCase):
   def test_status_default(self, sys_exit_mock, structured_out_mock):
     
     try:
-      self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
                        classname = "FlumeHandler",
                        command = "status",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
     except:
       # expected since ComponentIsNotRunning gets raised
       pass
@@ -112,10 +123,12 @@ class TestFlumeHandler(RMFTestCase):
     glob_mock.return_value = ['/etc/flume/conf/a1/ambari-meta.json']
 
     try:
-      self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
                        classname = "FlumeHandler",
                        command = "status",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
     except:
       # expected since ComponentIsNotRunning gets raised
       pass
@@ -135,10 +148,12 @@ class TestFlumeHandler(RMFTestCase):
     glob_mock.return_value = []
 
     try:
-      self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
        classname = "FlumeHandler",
        command = "status",
-       config_file="default.json")
+       config_file="default.json",
+       hdp_stack_version = self.STACK_VERSION,
+       target = RMFTestCase.TARGET_COMMON_SERVICES)
     except:
       # expected since ComponentIsNotRunning gets raised
       pass
@@ -230,10 +245,12 @@ class TestFlumeHandler(RMFTestCase):
     # 2nd call is to check if the process is live - that should be False
     os_path_isfile_mock.side_effect = [True, False]
 
-    self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
                        classname = "FlumeHandler",
                        command = "start",
-                       config_file="flume_target.json")
+                       config_file="flume_target.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assert_configure_many()
 
@@ -258,10 +275,12 @@ class TestFlumeHandler(RMFTestCase):
     # 2nd call is to check if the process is live - that should be False
     os_path_isfile_mock.side_effect = [True, False]
 
-    self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
                        classname = "FlumeHandler",
                        command = "start",
-                       config_file="flume_target.json")
+                       config_file="flume_target.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assert_configure_many()
 
@@ -282,10 +301,12 @@ class TestFlumeHandler(RMFTestCase):
   def test_stop_single(self, glob_mock):
     glob_mock.return_value = ['/var/run/flume/b1.pid']
 
-    self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
                        classname = "FlumeHandler",
                        command = "stop",
-                       config_file="flume_target.json")
+                       config_file="flume_target.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertTrue(glob_mock.called)
 
@@ -301,10 +322,12 @@ class TestFlumeHandler(RMFTestCase):
   def test_configure_with_existing(self, os_unlink_mock, expected_names_mock):
     expected_names_mock.return_value = ["x1"]
 
-    self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
                        classname = "FlumeHandler",
                        command = "configure",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertTrue(os_unlink_mock.called)
     os_unlink_mock.assert_called_with('/etc/flume/conf/x1/ambari-meta.json')
@@ -314,10 +337,12 @@ class TestFlumeHandler(RMFTestCase):
 
 
   def test_flume_env_not_22(self):
-    self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
                        classname = "FlumeHandler",
                        command = "configure",
-                       config_file="default.json")
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Directory', '/etc/flume/conf', recursive=True)
 
@@ -349,10 +374,12 @@ class TestFlumeHandler(RMFTestCase):
                               content=content)
 
   def test_flume_env_with_22(self):
-    self.executeScript("2.0.6/services/FLUME/package/scripts/flume_handler.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_handler.py",
                        classname = "FlumeHandler",
                        command = "configure",
-                       config_file="flume_22.json")
+                       config_file="flume_22.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Directory', '/etc/flume/conf', recursive=True)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_service_check.py
index 68acd37..c862880 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_service_check.py
@@ -22,12 +22,16 @@ from stacks.utils.RMFTestCase import *
 
 
 class TestFlumeCheck(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "FLUME/1.4.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_service_check(self):
-    self.executeScript("2.0.6/services/FLUME/package/scripts/flume_check.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/flume_check.py",
                        classname="FlumeServiceCheck",
                        command="service_check",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /usr/bin/flume-ng version',

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
index df15dff..67b88ca 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
@@ -22,12 +22,16 @@ from stacks.utils.RMFTestCase import *
 
 
 class TestGangliaMonitor(RMFTestCase):
-
+  COMMON_SERVICES_PACKAGE_DIR = "GANGLIA/3.5.0/package"
+  STACK_VERSION = "2.0.6"
+  
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_monitor.py",
                        classname="GangliaMonitor",
                        command="configure",
-                       config_file="default.json"
+                       config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assert_gmond_master_conf_generated()
@@ -35,20 +39,24 @@ class TestGangliaMonitor(RMFTestCase):
 
 
   def test_configure_non_gmetad_node(self):
-    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_monitor.py",
                        classname="GangliaMonitor",
                        command="configure",
                        config_file="default.non_gmetad_host.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertNoMoreResources()
 
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_monitor.py",
                        classname="GangliaMonitor",
                        command="start",
                        config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assert_gmond_master_conf_generated()
@@ -59,10 +67,12 @@ class TestGangliaMonitor(RMFTestCase):
 
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_monitor.py",
                        classname="GangliaMonitor",
                        command="stop",
                        config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
                               path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
@@ -71,10 +81,12 @@ class TestGangliaMonitor(RMFTestCase):
 
 
   def test_install_default(self):
-    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_monitor.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_monitor.py",
                        classname="GangliaMonitor",
                        command="install",
                        config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assert_gmond_master_conf_generated()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
index b8050f4..470fcc7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
@@ -22,21 +22,27 @@ from stacks.utils.RMFTestCase import *
 from mock.mock import MagicMock, call, patch
 
 class TestGangliaServer(RMFTestCase):
-
+  COMMON_SERVICES_PACKAGE_DIR = "GANGLIA/3.5.0/package"
+  STACK_VERSION = "2.0.6"
+  
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
                      classname="GangliaServer",
                      command="configure",
                      config_file="default.json",
+                     hdp_stack_version = self.STACK_VERSION,
+                     target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertNoMoreResources()
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
                        classname="GangliaServer",
                        command="start",
                        config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertResourceCalled('Execute', 'service hdp-gmetad start >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
@@ -47,10 +53,12 @@ class TestGangliaServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
                        classname="GangliaServer",
                        command="stop",
                        config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
                               path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
@@ -60,10 +68,12 @@ class TestGangliaServer(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_install_default(self):
-    self.executeScript("2.0.6/services/GANGLIA/package/scripts/ganglia_server.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/ganglia_server.py",
                        classname="GangliaServer",
                        command="install",
                        config_file="default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
index e1d52c5..9124d64 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
@@ -22,12 +22,16 @@ from stacks.utils.RMFTestCase import *
 
 @patch("os.path.exists", new = MagicMock(return_value=True))
 class TestHBaseClient(RMFTestCase):
-  
+  COMMON_SERVICES_PACKAGE_DIR = "HBASE/0.96.0.2.0/package"
+  STACK_VERSION = "2.0.6"
+
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_client.py",
                    classname = "HbaseClient",
                    command = "configure",
-                   config_file="secured.json"
+                   config_file="secured.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
     self.assertResourceCalled('Directory', '/etc/hbase',
@@ -107,10 +111,12 @@ class TestHBaseClient(RMFTestCase):
     self.assertNoMoreResources()
     
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HBASE/package/scripts/hbase_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_client.py",
                    classname = "HbaseClient",
                    command = "configure",
-                   config_file="default.json"
+                   config_file="default.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/hbase',
       mode = 0755


[28/37] ambari git commit: AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.oracle.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
deleted file mode 100644
index 812b897..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
+++ /dev/null
@@ -1,718 +0,0 @@
--- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE SEQUENCE_TABLE
-(
-   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
-   NEXT_VAL NUMBER NOT NULL
-);
-
-ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
-
--- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
--- This table is required if datanucleus.autoStartMechanism=SchemaTable
--- NOTE: Some versions of SchemaTool do not automatically generate this table.
--- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
-CREATE TABLE NUCLEUS_TABLES
-(
-   CLASS_NAME VARCHAR2(128) NOT NULL,
-   TABLE_NAME VARCHAR2(128) NOT NULL,
-   TYPE VARCHAR2(4) NOT NULL,
-   OWNER VARCHAR2(2) NOT NULL,
-   VERSION VARCHAR2(20) NOT NULL,
-   INTERFACE_NAME VARCHAR2(255) NULL
-);
-
-ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
-
--- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-CREATE TABLE PART_COL_PRIVS
-(
-    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_COL_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
-
--- Table CDS.
-CREATE TABLE CDS
-(
-    CD_ID NUMBER NOT NULL
-);
-
-ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
-
--- Table COLUMNS_V2 for join relationship
-CREATE TABLE COLUMNS_V2
-(
-    CD_ID NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    "COLUMN_NAME" VARCHAR2(128) NOT NULL,
-    TYPE_NAME VARCHAR2(4000) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
-
--- Table PARTITION_KEY_VALS for join relationship
-CREATE TABLE PARTITION_KEY_VALS
-(
-    PART_ID NUMBER NOT NULL,
-    PART_KEY_VAL VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
-
--- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE TABLE DBS
-(
-    DB_ID NUMBER NOT NULL,
-    "DESC" VARCHAR2(4000) NULL,
-    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
-    "NAME" VARCHAR2(128) NULL
-);
-
-ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
-
--- Table PARTITION_PARAMS for join relationship
-CREATE TABLE PARTITION_PARAMS
-(
-    PART_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
-
--- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-CREATE TABLE SERDES
-(
-    SERDE_ID NUMBER NOT NULL,
-    "NAME" VARCHAR2(128) NULL,
-    SLIB VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
-
--- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
-CREATE TABLE TYPES
-(
-    TYPES_ID NUMBER NOT NULL,
-    TYPE_NAME VARCHAR2(128) NULL,
-    TYPE1 VARCHAR2(767) NULL,
-    TYPE2 VARCHAR2(767) NULL
-);
-
-ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
-
--- Table PARTITION_KEYS for join relationship
-CREATE TABLE PARTITION_KEYS
-(
-    TBL_ID NUMBER NOT NULL,
-    PKEY_COMMENT VARCHAR2(4000) NULL,
-    PKEY_NAME VARCHAR2(128) NOT NULL,
-    PKEY_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
-
--- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE TABLE ROLES
-(
-    ROLE_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    OWNER_NAME VARCHAR2(128) NULL,
-    ROLE_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
-
--- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
-CREATE TABLE PARTITIONS
-(
-    PART_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    PART_NAME VARCHAR2(767) NULL,
-    SD_ID NUMBER NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
-
--- Table INDEX_PARAMS for join relationship
-CREATE TABLE INDEX_PARAMS
-(
-    INDEX_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
-
--- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-CREATE TABLE TBL_COL_PRIVS
-(
-    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_COL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
-
--- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
-CREATE TABLE IDXS
-(
-    INDEX_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
-    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
-    INDEX_NAME VARCHAR2(128) NULL,
-    INDEX_TBL_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    ORIG_TBL_ID NUMBER NULL,
-    SD_ID NUMBER NULL
-);
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
-
--- Table BUCKETING_COLS for join relationship
-CREATE TABLE BUCKETING_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    BUCKET_COL_NAME VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TYPE_FIELDS for join relationship
-CREATE TABLE TYPE_FIELDS
-(
-    TYPE_NAME NUMBER NOT NULL,
-    "COMMENT" VARCHAR2(256) NULL,
-    FIELD_NAME VARCHAR2(128) NOT NULL,
-    FIELD_TYPE VARCHAR2(767) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
-
--- Table SD_PARAMS for join relationship
-CREATE TABLE SD_PARAMS
-(
-    SD_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
-
--- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE TABLE GLOBAL_PRIVS
-(
-    USER_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    USER_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
-
--- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-CREATE TABLE SDS
-(
-    SD_ID NUMBER NOT NULL,
-    CD_ID NUMBER NULL,
-    INPUT_FORMAT VARCHAR2(4000) NULL,
-    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
-    LOCATION VARCHAR2(4000) NULL,
-    NUM_BUCKETS NUMBER (10) NOT NULL,
-    OUTPUT_FORMAT VARCHAR2(4000) NULL,
-    SERDE_ID NUMBER NULL,
-    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
-);
-
-ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
-
--- Table TABLE_PARAMS for join relationship
-CREATE TABLE TABLE_PARAMS
-(
-    TBL_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
-
--- Table SORT_COLS for join relationship
-CREATE TABLE SORT_COLS
-(
-    SD_ID NUMBER NOT NULL,
-    "COLUMN_NAME" VARCHAR2(128) NULL,
-    "ORDER" NUMBER (10) NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
--- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-CREATE TABLE TBL_PRIVS
-(
-    TBL_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    TBL_PRIV VARCHAR2(128) NULL,
-    TBL_ID NUMBER NULL
-);
-
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
-
--- Table DATABASE_PARAMS for join relationship
-CREATE TABLE DATABASE_PARAMS
-(
-    DB_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(180) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
-
--- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
-CREATE TABLE ROLE_MAP
-(
-    ROLE_GRANT_ID NUMBER NOT NULL,
-    ADD_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    ROLE_ID NUMBER NULL
-);
-
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
-
--- Table SERDE_PARAMS for join relationship
-CREATE TABLE SERDE_PARAMS
-(
-    SERDE_ID NUMBER NOT NULL,
-    PARAM_KEY VARCHAR2(256) NOT NULL,
-    PARAM_VALUE VARCHAR2(4000) NULL
-);
-
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
-
--- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-CREATE TABLE PART_PRIVS
-(
-    PART_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PART_ID NUMBER NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    PART_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
-
--- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-CREATE TABLE DB_PRIVS
-(
-    DB_GRANT_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    GRANT_OPTION NUMBER (5) NOT NULL,
-    GRANTOR VARCHAR2(128) NULL,
-    GRANTOR_TYPE VARCHAR2(128) NULL,
-    PRINCIPAL_NAME VARCHAR2(128) NULL,
-    PRINCIPAL_TYPE VARCHAR2(128) NULL,
-    DB_PRIV VARCHAR2(128) NULL
-);
-
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
-
--- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
-CREATE TABLE TBLS
-(
-    TBL_ID NUMBER NOT NULL,
-    CREATE_TIME NUMBER (10) NOT NULL,
-    DB_ID NUMBER NULL,
-    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
-    OWNER VARCHAR2(767) NULL,
-    RETENTION NUMBER (10) NOT NULL,
-    SD_ID NUMBER NULL,
-    TBL_NAME VARCHAR2(128) NULL,
-    TBL_TYPE VARCHAR2(128) NULL,
-    VIEW_EXPANDED_TEXT CLOB NULL,
-    VIEW_ORIGINAL_TEXT CLOB NULL
-);
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
-
--- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE TABLE PARTITION_EVENTS
-(
-    PART_NAME_ID NUMBER NOT NULL,
-    DB_NAME VARCHAR2(128) NULL,
-    EVENT_TIME NUMBER NOT NULL,
-    EVENT_TYPE NUMBER (10) NOT NULL,
-    PARTITION_NAME VARCHAR2(767) NULL,
-    TBL_NAME VARCHAR2(128) NULL
-);
-
-ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
-
--- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
-CREATE TABLE SKEWED_STRING_LIST
-(
-    STRING_LIST_ID NUMBER NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
-
-CREATE TABLE SKEWED_STRING_LIST_VALUES
-(
-    STRING_LIST_ID NUMBER NOT NULL,
-    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_NAMES
-(
-    SD_ID NUMBER NOT NULL,
-    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
-(
-    SD_ID NUMBER NOT NULL,
-    STRING_LIST_ID_KID NUMBER NOT NULL,
-    "LOCATION" VARCHAR2(4000) NULL
-);
-
-CREATE TABLE MASTER_KEYS
-(
-    KEY_ID NUMBER (10) NOT NULL,
-    MASTER_KEY VARCHAR2(767) NULL
-);
-
-CREATE TABLE DELEGATION_TOKENS
-(
-    TOKEN_IDENT VARCHAR2(767) NOT NULL,
-    TOKEN VARCHAR2(767) NULL
-);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE TABLE SKEWED_VALUES
-(
-    SD_ID_OID NUMBER NOT NULL,
-    STRING_LIST_ID_EID NUMBER NOT NULL,
-    INTEGER_IDX NUMBER(10) NOT NULL
-);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
--- column statistics
-
-CREATE TABLE TAB_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(128) NOT NULL, 
- COLUMN_NAME VARCHAR2(128) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- TBL_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-CREATE TABLE VERSION (
-  VER_ID NUMBER NOT NULL,
-  SCHEMA_VERSION VARCHAR(127) NOT NULL,
-  VERSION_COMMENT VARCHAR(255)
-);
-ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
-
-CREATE TABLE PART_COL_STATS (
- CS_ID NUMBER NOT NULL,
- DB_NAME VARCHAR2(128) NOT NULL,
- TABLE_NAME VARCHAR2(128) NOT NULL,
- PARTITION_NAME VARCHAR2(767) NOT NULL,
- COLUMN_NAME VARCHAR2(128) NOT NULL,
- COLUMN_TYPE VARCHAR2(128) NOT NULL,
- PART_ID NUMBER NOT NULL,
- LONG_LOW_VALUE NUMBER,
- LONG_HIGH_VALUE NUMBER,
- DOUBLE_LOW_VALUE NUMBER,
- DOUBLE_HIGH_VALUE NUMBER,
- BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
- BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
- NUM_NULLS NUMBER NOT NULL,
- NUM_DISTINCTS NUMBER,
- AVG_COL_LEN NUMBER,
- MAX_COL_LEN NUMBER,
- NUM_TRUES NUMBER,
- NUM_FALSES NUMBER,
- LAST_ANALYZED NUMBER NOT NULL
-);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
-
-ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
-
-CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
-
--- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
-ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
-
-CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table COLUMNS_V2
-ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
-
-
--- Constraints for table PARTITION_KEY_VALS
-ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
-
-
--- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
-CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
-
-
--- Constraints for table PARTITION_PARAMS
-ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
-
-
--- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
-
--- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
-CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
-
-
--- Constraints for table PARTITION_KEYS
-ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
-
-
--- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
-CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
-
-
--- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
-
-CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
-
-CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
-
-
--- Constraints for table INDEX_PARAMS
-ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
-
-
--- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
-ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
-
-
--- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
-
-CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
-
-CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
-
-CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
-
-
--- Constraints for table BUCKETING_COLS
-ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
-
-
--- Constraints for table TYPE_FIELDS
-ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
-
-
--- Constraints for table SD_PARAMS
-ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
-
-
--- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
-CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
-CREATE INDEX SDS_N50 ON SDS (CD_ID);
-
-
--- Constraints for table TABLE_PARAMS
-ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
-
-
--- Constraints for table SORT_COLS
-ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
-
-
--- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
-ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
-
-CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table DATABASE_PARAMS
-ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
-
-
--- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
-ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
-
-CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
-
-
--- Constraints for table SERDE_PARAMS
-ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
-
-
--- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
-ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
-
-
--- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
-ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
-
-CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
-
-
--- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
-
-ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
-
-CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
-
-CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
-
-CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
-
-
--- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
-CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
-
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.postgres.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
deleted file mode 100644
index bc6486b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.postgres.sql
+++ /dev/null
@@ -1,1406 +0,0 @@
---
--- PostgreSQL database dump
---
-
-SET statement_timeout = 0;
-SET client_encoding = 'UTF8';
-SET standard_conforming_strings = off;
-SET check_function_bodies = false;
-SET client_min_messages = warning;
-SET escape_string_warning = off;
-
-SET search_path = public, pg_catalog;
-
-SET default_tablespace = '';
-
-SET default_with_oids = false;
-
---
--- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "BUCKETING_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "CDS" (
-    "CD_ID" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_OLD; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_OLD" (
-    "SD_ID" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "COLUMN_NAME" character varying(128) NOT NULL,
-    "TYPE_NAME" character varying(4000) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "COLUMNS_V2" (
-    "CD_ID" bigint NOT NULL,
-    "COMMENT" character varying(4000),
-    "COLUMN_NAME" character varying(128) NOT NULL,
-    "TYPE_NAME" character varying(4000),
-    "INTEGER_IDX" integer NOT NULL
-);
-
-
---
--- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DATABASE_PARAMS" (
-    "DB_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(180) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DBS" (
-    "DB_ID" bigint NOT NULL,
-    "DESC" character varying(4000) DEFAULT NULL::character varying,
-    "DB_LOCATION_URI" character varying(4000) NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "DB_PRIVS" (
-    "DB_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "GLOBAL_PRIVS" (
-    "USER_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "IDXS" (
-    "INDEX_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DEFERRED_REBUILD" boolean NOT NULL,
-    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
-    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
-    "INDEX_TBL_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "ORIG_TBL_ID" bigint,
-    "SD_ID" bigint
-);
-
-
---
--- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "INDEX_PARAMS" (
-    "INDEX_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "NUCLEUS_TABLES" (
-    "CLASS_NAME" character varying(128) NOT NULL,
-    "TABLE_NAME" character varying(128) NOT NULL,
-    "TYPE" character varying(4) NOT NULL,
-    "OWNER" character varying(2) NOT NULL,
-    "VERSION" character varying(20) NOT NULL,
-    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITIONS" (
-    "PART_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
-    "SD_ID" bigint,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_EVENTS" (
-    "PART_NAME_ID" bigint NOT NULL,
-    "DB_NAME" character varying(128),
-    "EVENT_TIME" bigint NOT NULL,
-    "EVENT_TYPE" integer NOT NULL,
-    "PARTITION_NAME" character varying(767),
-    "TBL_NAME" character varying(128)
-);
-
-
---
--- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEYS" (
-    "TBL_ID" bigint NOT NULL,
-    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
-    "PKEY_NAME" character varying(128) NOT NULL,
-    "PKEY_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_KEY_VALS" (
-    "PART_ID" bigint NOT NULL,
-    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PARTITION_PARAMS" (
-    "PART_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_PRIVS" (
-    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_PRIVS" (
-    "PART_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_ID" bigint,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLES" (
-    "ROLE_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
-);
-
-
---
--- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "ROLE_MAP" (
-    "ROLE_GRANT_ID" bigint NOT NULL,
-    "ADD_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "ROLE_ID" bigint
-);
-
-
---
--- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SDS" (
-    "SD_ID" bigint NOT NULL,
-    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "IS_COMPRESSED" boolean NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
-    "NUM_BUCKETS" bigint NOT NULL,
-    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
-    "SERDE_ID" bigint,
-    "CD_ID" bigint,
-    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
-);
-
-
---
--- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SD_PARAMS" (
-    "SD_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SEQUENCE_TABLE" (
-    "SEQUENCE_NAME" character varying(255) NOT NULL,
-    "NEXT_VAL" bigint NOT NULL
-);
-
-
---
--- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDES" (
-    "SERDE_ID" bigint NOT NULL,
-    "NAME" character varying(128) DEFAULT NULL::character varying,
-    "SLIB" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SERDE_PARAMS" (
-    "SERDE_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "SORT_COLS" (
-    "SD_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "ORDER" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TABLE_PARAMS" (
-    "TBL_ID" bigint NOT NULL,
-    "PARAM_KEY" character varying(256) NOT NULL,
-    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBLS" (
-    "TBL_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "DB_ID" bigint,
-    "LAST_ACCESS_TIME" bigint NOT NULL,
-    "OWNER" character varying(767) DEFAULT NULL::character varying,
-    "RETENTION" bigint NOT NULL,
-    "SD_ID" bigint,
-    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "VIEW_EXPANDED_TEXT" text,
-    "VIEW_ORIGINAL_TEXT" text
-);
-
-
---
--- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_COL_PRIVS" (
-    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
-    "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TBL_PRIVS" (
-    "TBL_GRANT_ID" bigint NOT NULL,
-    "CREATE_TIME" bigint NOT NULL,
-    "GRANT_OPTION" smallint NOT NULL,
-    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
-    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
-    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
-    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
-    "TBL_ID" bigint
-);
-
-
---
--- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPES" (
-    "TYPES_ID" bigint NOT NULL,
-    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
-    "TYPE1" character varying(767) DEFAULT NULL::character varying,
-    "TYPE2" character varying(767) DEFAULT NULL::character varying
-);
-
-
---
--- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "TYPE_FIELDS" (
-    "TYPE_NAME" bigint NOT NULL,
-    "COMMENT" character varying(256) DEFAULT NULL::character varying,
-    "FIELD_NAME" character varying(128) NOT NULL,
-    "FIELD_TYPE" character varying(767) NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST" (
-    "STRING_LIST_ID" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
-    "STRING_LIST_ID" bigint NOT NULL,
-    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_NAMES" (
-    "SD_ID" bigint NOT NULL,
-    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
-    "SD_ID" bigint NOT NULL,
-    "STRING_LIST_ID_KID" bigint NOT NULL,
-    "LOCATION" character varying(4000) DEFAULT NULL::character varying
-);
-
-CREATE TABLE "SKEWED_VALUES" (
-    "SD_ID_OID" bigint NOT NULL,
-    "STRING_LIST_ID_EID" bigint NOT NULL,
-    "INTEGER_IDX" bigint NOT NULL
-);
-
-
---
--- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE  "MASTER_KEYS"
-(
-    "KEY_ID" SERIAL,
-    "MASTER_KEY" varchar(767) NULL,
-    PRIMARY KEY ("KEY_ID")
-);
-
-CREATE TABLE  "DELEGATION_TOKENS"
-(
-    "TOKEN_IDENT" varchar(767) NOT NULL,
-    "TOKEN" varchar(767) NULL,
-    PRIMARY KEY ("TOKEN_IDENT")
-);
-
-CREATE TABLE "TAB_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "TBL_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Table structure for VERSION
---
-CREATE TABLE "VERSION" (
-  "VER_ID" bigint,
-  "SCHEMA_VERSION" character varying(127) NOT NULL,
-  "VERSION_COMMENT" character varying(255) NOT NULL
-);
-
---
--- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE TABLE "PART_COL_STATS" (
- "CS_ID" bigint NOT NULL,
- "DB_NAME" character varying(128) DEFAULT NULL::character varying,
- "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
- "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
- "COLUMN_NAME" character varying(128) DEFAULT NULL::character varying,
- "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
- "PART_ID" bigint NOT NULL,
- "LONG_LOW_VALUE" bigint,
- "LONG_HIGH_VALUE" bigint,
- "DOUBLE_LOW_VALUE" double precision,
- "DOUBLE_HIGH_VALUE" double precision,
- "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
- "NUM_NULLS" bigint NOT NULL,
- "NUM_DISTINCTS" bigint,
- "AVG_COL_LEN" double precision,
- "MAX_COL_LEN" bigint,
- "NUM_TRUES" bigint,
- "NUM_FALSES" bigint,
- "LAST_ANALYZED" bigint NOT NULL
-);
-
---
--- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "CDS"
-    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
-
-
---
--- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
-
-
---
--- Name: COLUMNS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "COLUMNS_OLD"
-    ADD CONSTRAINT "COLUMNS_pkey" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
-
-
---
--- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
-
-
---
--- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
-
-
---
--- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
-
-
---
--- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "GLOBAL_PRIVS"
-    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
-
-
---
--- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
-
-
---
--- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
-
-
---
--- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "NUCLEUS_TABLES"
-    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
-
-
---
--- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
-
-
---
--- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_EVENTS"
-    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
-
-
---
--- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
-
-
---
--- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
-
-
---
--- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
-
-
---
--- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
-
-
---
--- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
-
-
---
--- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
-
-
---
--- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLES"
-    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
-
-
---
--- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
-
-
---
--- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
-
-
---
--- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
-
-
---
--- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SEQUENCE_TABLE"
-    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
-
-
---
--- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDES"
-    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
-
-
---
--- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
-
-
---
--- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-
---
--- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
-
-
---
--- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
-
-
---
--- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
-
-
---
--- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
-
-
---
--- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
-
-
---
--- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
-
---
--- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
-
---
--- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
-
-
---
--- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
-
-
---
--- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
-
-
---
--- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "DBS"
-    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
-
-
---
--- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "TYPES"
-    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
-
-
---
--- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
-
-
---
--- Name: COLUMNS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "COLUMNS_N49" ON "COLUMNS_OLD" USING btree ("SD_ID");
-
-
---
--- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
-
-
---
--- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
-
-
---
--- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
-
-
---
--- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
-
-
---
--- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
-
-
---
--- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
-
-
---
--- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
-
-
---
--- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
-
-
---
--- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
-
-
---
--- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
-
-
---
--- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
-
-
---
--- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
-
-
---
--- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
-
-
---
--- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
-
-
---
--- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
-
-
---
--- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
-
-
---
--- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
-
-
---
--- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
-
-
---
--- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
-
-
---
--- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
-
-
---
--- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
-
-
---
--- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
-
-
---
--- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
-
---
--- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
-
---
--- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
-
-
-ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
-    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_NAMES"
-    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
-    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
-
-ALTER TABLE ONLY "SKEWED_VALUES"
-    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "BUCKETING_COLS"
-    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_OLD"
-    ADD CONSTRAINT "COLUMNS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "COLUMNS_V2"
-    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DATABASE_PARAMS"
-    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "DB_PRIVS"
-    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "IDXS"
-    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "INDEX_PARAMS"
-    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITIONS"
-    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEYS"
-    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_KEY_VALS"
-    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PARTITION_PARAMS"
-    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_COL_PRIVS"
-    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "PART_PRIVS"
-    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
---
--- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "ROLE_MAP"
-    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
-
-
---
--- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
-
-
---
--- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SDS"
-    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SD_PARAMS"
-    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SERDE_PARAMS"
-    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
-
-
---
--- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "SORT_COLS"
-    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TABLE_PARAMS"
-    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
-
-
---
--- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBLS"
-    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
-
-
---
--- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_COL_PRIVS"
-    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TBL_PRIVS"
-    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-
-ALTER TABLE ONLY "TYPE_FIELDS"
-    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
-
---
--- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
-
-
---
--- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
---
-ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
-
-
-ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
-
---
--- Name: public; Type: ACL; Schema: -; Owner: hiveuser
---
-
-REVOKE ALL ON SCHEMA public FROM PUBLIC;
-GRANT ALL ON SCHEMA public TO PUBLIC;
-
-
-INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '0.12.0', 'Hive release version 0.12.0');
---
--- PostgreSQL database dump complete
---
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
index 26edb84..3879149 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
@@ -20,275 +20,7 @@
   <services>
     <service>
       <name>HIVE</name>
-      <displayName>Hive</displayName>
-      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-      <version>0.12.0.2.0</version>
-      <components>
-
-        <component>
-          <name>HIVE_METASTORE</name>
-          <displayName>Hive Metastore</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <clientsToUpdateConfigs></clientsToUpdateConfigs>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>HIVE/HIVE_SERVER</co-locate>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/hive_metastore.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>hive-site</config-type>
-          </configuration-dependencies>
-        </component>
-
-        <component>
-          <name>HIVE_SERVER</name>
-          <displayName>HiveServer2</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <clientsToUpdateConfigs></clientsToUpdateConfigs>
-          <dependencies>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HIVE/HIVE_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/hive_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>hive-site</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>WEBHCAT_SERVER</name>
-          <displayName>WebHCat Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <clientsToUpdateConfigs>
-            <client>HCAT</client>
-          </clientsToUpdateConfigs>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>PIG/PIG</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/webhcat_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>hive-site</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>MYSQL_SERVER</name>
-          <displayName>MySQL Server</displayName>
-          <category>MASTER</category>
-          <cardinality>0-1</cardinality>
-          <clientsToUpdateConfigs></clientsToUpdateConfigs>
-          <commandScript>
-            <script>scripts/mysql_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>CLEAN</name>
-                <commandScript>
-                  <script>scripts/mysql_server.py</script>
-                  <scriptType>PYTHON</scriptType>
-                  <timeout>600</timeout>
-            </commandScript>
-                </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>HIVE_CLIENT</name>
-          <displayName>Hive Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hive_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>hive-site.xml</fileName>
-              <dictionaryName>hive-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-env.sh</fileName>
-              <dictionaryName>hive-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-log4j.properties</fileName>
-              <dictionaryName>hive-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-exec-log4j.properties</fileName>
-              <dictionaryName>hive-exec-log4j</dictionaryName>
-            </configFile>
-          </configFiles>
-          <configuration-dependencies>
-            <config-type>hive-site</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>HCAT</name>
-          <displayName>HCat Client</displayName>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>hcat-env.sh</fileName>
-              <dictionaryName>hcat-env</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-        <configuration-dependencies>
-          <config-type>hive-site</config-type>
-        </configuration-dependencies>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hive</name>
-            </package>
-            <package>
-              <name>hcatalog</name>
-            </package>
-            <package>
-              <name>webhcat-tar-hive</name>
-            </package>
-            <package>
-              <name>webhcat-tar-pig</name>
-            </package>
-            <package>
-              <name>mysql-connector-java</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,suse11</osFamily>
-          <packages>
-            <package>
-              <name>mysql</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5,redhat6,ubuntu12</osFamily>
-          <packages>
-            <package>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>mysql-client</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>YARN</service>
-        <service>TEZ</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>hive-log4j</config-type>
-        <config-type>hive-exec-log4j</config-type>
-        <config-type>hive-env</config-type>
-        <config-type>webhcat-site</config-type>
-        <config-type>webhcat-env</config-type>
-      </configuration-dependencies>
+      <extends>common-services/HIVE/0.12.0.2.0</extends>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh
deleted file mode 100644
index 4e8b968..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/addMysqlUser.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-mysqldbpasswd=$3
-userhost=$4
-
-sudo service $mysqldservice start
-echo "Adding user $mysqldbuser@% and removing users with empty name"
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"CREATE USER '$mysqldbuser'@'%' IDENTIFIED BY '$mysqldbpasswd';\""
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'%';\""
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"DELETE FROM mysql.user WHERE user='';\""
-sudo su mysql -s /bin/bash - -c "mysql -u root -e \"flush privileges;\""
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/alert_hive_thrift_port.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/alert_hive_thrift_port.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/alert_hive_thrift_port.py
deleted file mode 100644
index 499640f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/alert_hive_thrift_port.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import json
-import socket
-import time
-import traceback
-import urllib2
-from resource_management.libraries.functions import hive_check
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.core.environment import Environment
-
-OK_MESSAGE = "TCP OK - %.4f response on port %s"
-CRITICAL_MESSAGE = "Connection failed on host {0}:{1}"
-
-HIVE_SERVER_THRIFT_PORT_KEY = '{{hive-site/hive.server2.thrift.port}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-HIVE_SERVER2_AUTHENTICATION_KEY = '{{hive-site/hive.server2.authentication}}'
-HIVE_SERVER_PRINCIPAL_KEY = '{{hive-site/hive.server2.authentication.kerberos.principal}}'
-SMOKEUSER_KEYTAB_KEY = '{{cluster-env/smokeuser_keytab}}'
-SMOKEUSER_KEY = '{{cluster-env/smokeuser}}'
-
-PERCENT_WARNING = 200
-PERCENT_CRITICAL = 200
-
-THRIFT_PORT_DEFAULT = 10000
-HIVE_SERVER_PRINCIPAL_DEFAULT = 'hive/_HOST@EXAMPLE.COM'
-HIVE_SERVER2_AUTHENTICATION_DEFAULT = 'NOSASL'
-SMOKEUSER_KEYTAB_DEFAULT = '/etc/security/keytabs/smokeuser.headless.keytab'
-SMOKEUSER_DEFAULT = 'ambari-qa'
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (HIVE_SERVER_THRIFT_PORT_KEY,SECURITY_ENABLED_KEY,HIVE_SERVER2_AUTHENTICATION_KEY,HIVE_SERVER_PRINCIPAL_KEY,SMOKEUSER_KEYTAB_KEY,SMOKEUSER_KEY)
-  
-
-def execute(parameters=None, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  parameters (dictionary): a mapping of parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  if parameters is None:
-    return (('UNKNOWN', ['There were no parameters supplied to the script.']))
-
-  thrift_port = THRIFT_PORT_DEFAULT
-  if HIVE_SERVER_THRIFT_PORT_KEY in parameters:
-    thrift_port = int(parameters[HIVE_SERVER_THRIFT_PORT_KEY])  
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in parameters:
-    security_enabled = str(parameters[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
-
-  hive_server2_authentication = HIVE_SERVER2_AUTHENTICATION_DEFAULT
-  if HIVE_SERVER2_AUTHENTICATION_KEY in parameters:
-    hive_server2_authentication = parameters[HIVE_SERVER2_AUTHENTICATION_KEY]
-
-  smokeuser = SMOKEUSER_DEFAULT
-  if SMOKEUSER_KEY in parameters:
-    smokeuser = parameters[SMOKEUSER_KEY]
-
-  result_code = None
-
-  if security_enabled:
-    hive_server_principal = HIVE_SERVER_PRINCIPAL_DEFAULT
-    if HIVE_SERVER_PRINCIPAL_KEY in parameters:
-      hive_server_principal = parameters[HIVE_SERVER_PRINCIPAL_KEY]
-    smokeuser_keytab = SMOKEUSER_KEYTAB_DEFAULT
-    if SMOKEUSER_KEYTAB_KEY in parameters:
-      smokeuser_keytab = parameters[SMOKEUSER_KEYTAB_KEY]
-    with Environment() as env:
-      kinit_path_local = get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-      kinitcmd=format("{kinit_path_local} -kt {smokeuser_keytab} {smokeuser}; ")
-  else:
-    hive_server_principal = None
-    kinitcmd=None
-
-  try:
-    if host_name is None:
-      host_name = socket.getfqdn()
-
-    start_time = time.time()
-    try:
-      with Environment() as env:
-        hive_check.check_thrift_port_sasl(host_name, thrift_port, hive_server2_authentication,
-                                          hive_server_principal, kinitcmd, smokeuser)
-      is_thrift_port_ok = True
-    except:
-      is_thrift_port_ok = False
-
-    if is_thrift_port_ok == True:
-      result_code = 'OK'
-      total_time = time.time() - start_time
-      label = OK_MESSAGE % (total_time, thrift_port)
-    else:
-      result_code = 'CRITICAL'
-      label = CRITICAL_MESSAGE.format(host_name,thrift_port)
-
-  except Exception, e:
-    label = str(e)
-    result_code = 'UNKNOWN'
-        
-  return ((result_code, [label]))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/alert_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/alert_webhcat_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/alert_webhcat_server.py
deleted file mode 100644
index 44840de..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/alert_webhcat_server.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import json
-import socket
-import time
-import urllib2
-
-RESULT_CODE_OK = 'OK'
-RESULT_CODE_CRITICAL = 'CRITICAL'
-RESULT_CODE_UNKNOWN = 'UNKNOWN'
-
-OK_MESSAGE = 'TCP OK - {0:.4f} response on port {1}'
-CRITICAL_CONNECTION_MESSAGE = 'Connection failed on host {0}:{1}'
-CRITICAL_TEMPLETON_STATUS_MESSAGE = 'WebHCat returned an unexpected status of "{0}"'
-CRITICAL_TEMPLETON_UNKNOWN_JSON_MESSAGE = 'Unable to determine WebHCat health from unexpected JSON response'
-
-TEMPLETON_PORT_KEY = '{{webhcat-site/templeton.port}}'
-SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
-
-TEMPLETON_OK_RESPONSE = 'ok'
-TEMPLETON_PORT_DEFAULT = 50111
-
-def get_tokens():
-  """
-  Returns a tuple of tokens in the format {{site/property}} that will be used
-  to build the dictionary passed into execute
-  """
-  return (TEMPLETON_PORT_KEY,SECURITY_ENABLED_KEY)      
-  
-
-def execute(parameters=None, host_name=None):
-  """
-  Returns a tuple containing the result code and a pre-formatted result label
-
-  Keyword arguments:
-  parameters (dictionary): a mapping of parameter key to value
-  host_name (string): the name of this host where the alert is running
-  """
-
-  result_code = RESULT_CODE_UNKNOWN
-
-  if parameters is None:
-    return (result_code, ['There were no parameters supplied to the script.'])
-
-  templeton_port = TEMPLETON_PORT_DEFAULT
-  if TEMPLETON_PORT_KEY in parameters:
-    templeton_port = int(parameters[TEMPLETON_PORT_KEY])  
-
-  security_enabled = False
-  if SECURITY_ENABLED_KEY in parameters:
-    security_enabled = parameters[SECURITY_ENABLED_KEY].lower() == 'true'
-
-  scheme = 'http'
-  if security_enabled is True:
-    scheme = 'https'
-
-  label = ''
-  url_response = None
-  templeton_status = ''
-  total_time = 0
-
-  try:
-    # the alert will always run on the webhcat host
-    if host_name is None:
-      host_name = socket.getfqdn()
-    
-    query = "{0}://{1}:{2}/templeton/v1/status".format(scheme, host_name,
-        templeton_port)
-    
-    # execute the query for the JSON that includes templeton status
-    start_time = time.time()
-    url_response = urllib2.urlopen(query)
-    total_time = time.time() - start_time
-  except:
-    label = CRITICAL_CONNECTION_MESSAGE.format(host_name,templeton_port)
-    return (RESULT_CODE_CRITICAL, [label])
-
-  # URL response received, parse it
-  try:
-    json_response = json.loads(url_response.read())
-    templeton_status = json_response['status']
-  except:
-    return (RESULT_CODE_CRITICAL, [CRITICAL_TEMPLETON_UNKNOWN_JSON_MESSAGE])
-
-  # proper JSON received, compare against known value
-  if templeton_status.lower() == TEMPLETON_OK_RESPONSE:
-    result_code = RESULT_CODE_OK
-    label = OK_MESSAGE.format(total_time, templeton_port)
-  else:
-    result_code = RESULT_CODE_CRITICAL
-    label = CRITICAL_TEMPLETON_STATUS_MESSAGE.format(templeton_status)
-
-  return (result_code, [label])
\ No newline at end of file


[02/37] ambari git commit: AMBARI-8695: Common Services: Refactor HDP-2.0.6 HDFS, ZOOKEEPER services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zoo.cfg.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zoo.cfg.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zoo.cfg.xml
deleted file mode 100644
index 12e2a00..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zoo.cfg.xml
+++ /dev/null
@@ -1,62 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-  <property>
-    <name>dataDir</name>
-    <value>/hadoop/zookeeper</value>
-    <description>Data directory for ZooKeeper.</description>
-  </property>
-  <property>
-    <name>autopurge.snapRetainCount</name>
-    <value>30</value>
-    <description>ZooKeeper purge feature retains the autopurge.snapRetainCount
-      most recent snapshots and the corresponding transaction
-      logs in the dataDir and dataLogDir respectively and deletes the rest. </description>
-  </property>
-  <property>
-    <name>autopurge.purgeInterval</name>
-    <value>24</value>
-    <description>The time interval in hours for which the purge task has to be triggered.
-      Set to a positive integer (1 and above) to enable the auto purging.</description>
-  </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-env.xml
deleted file mode 100644
index 608f504..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-env.xml
+++ /dev/null
@@ -1,61 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>zk_user</name>
-    <value>zookeeper</value>
-    <property-type>USER</property-type>
-    <description>ZooKeeper User.</description>
-  </property>
-  <property>
-    <name>zk_log_dir</name>
-    <value>/var/log/zookeeper</value>
-    <description>ZooKeeper Log Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_dir</name>
-    <value>/var/run/zookeeper</value>
-    <description>ZooKeeper Pid Dir</description>
-  </property>
-
-  
-  <!-- zookeeper-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for zookeeper-env.sh file</description>
-    <value>
-export JAVA_HOME={{java64_home}}
-export ZOOKEEPER_HOME={{zk_home}}
-export ZOO_LOG_DIR={{zk_log_dir}}
-export ZOOPIDFILE={{zk_pid_file}}
-export SERVER_JVMFLAGS={{zk_server_heapsize}}
-export JAVA=$JAVA_HOME/bin/java
-export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
-
-{% if security_enabled %}
-export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
-export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
-{% endif %}
-    </value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
deleted file mode 100644
index 6fcf5bc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/zookeeper-log4j.xml
+++ /dev/null
@@ -1,101 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-#
-# ZooKeeper Logging Configuration
-#
-
-# DEFAULT: console appender only
-log4j.rootLogger=INFO, CONSOLE
-
-# Example with rolling log file
-#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
-
-# Example with rolling log file and tracing
-#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
-
-#
-# Log INFO level and above messages to the console
-#
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.Threshold=INFO
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-#
-# Add ROLLINGFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.ROLLINGFILE.Threshold=DEBUG
-log4j.appender.ROLLINGFILE.File=zookeeper.log
-
-# Max log file size of 10MB
-log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-# uncomment the next line to limit number of backup files
-#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
-
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-
-#
-# Add TRACEFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
-log4j.appender.TRACEFILE.Threshold=TRACE
-log4j.appender.TRACEFILE.File=zookeeper_trace.log
-
-log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
-### Notice we are including log4j's NDC here (%x)
-log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml
index 0cb65ea..06956a5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/metainfo.xml
@@ -20,70 +20,7 @@
   <services>
     <service>
       <name>ZOOKEEPER</name>
-      <displayName>ZooKeeper</displayName>
-      <comment>Centralized service which provides highly reliable distributed coordination</comment>
-      <version>3.4.5.2.0</version>
-      <components>
-
-        <component>
-          <name>ZOOKEEPER_SERVER</name>
-          <displayName>ZooKeeper Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/zookeeper_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZOOKEEPER_CLIENT</name>
-          <displayName>ZooKeeper Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/zookeeper_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>zookeeper-env.sh</fileName>
-              <dictionaryName>zookeeper-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>zookeeper-log4j</dictionaryName>
-            </configFile>            
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>zookeeper</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>zookeeper-log4j</config-type>
-        <config-type>zookeeper-env</config-type>
-        <config-type>zoo.cfg</config-type>
-      </configuration-dependencies>
-      <restartRequiredAfterChange>true</restartRequiredAfterChange>
+      <extends>common-services/ZOOKEEPER/3.4.5.2.0</extends>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkEnv.sh
deleted file mode 100644
index fa1b832..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkEnv.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script should be sourced into other zookeeper
-# scripts to setup the env variables
-
-# We use ZOOCFGDIR if defined,
-# otherwise we use /etc/zookeeper
-# or the conf directory that is
-# a sibling of this script's directory
-if [ "x$ZOOCFGDIR" = "x" ]
-then
-    if [ -d "/etc/zookeeper" ]
-    then
-        ZOOCFGDIR="/etc/zookeeper"
-    else
-        ZOOCFGDIR="$ZOOBINDIR/../conf"
-    fi
-fi
-
-if [ "x$ZOOCFG" = "x" ]
-then
-    ZOOCFG="zoo.cfg"
-fi
-
-ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
-
-if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
-then
-    . "$ZOOCFGDIR/zookeeper-env.sh"
-fi
-
-if [ "x${ZOO_LOG_DIR}" = "x" ]
-then
-    ZOO_LOG_DIR="."
-fi
-
-if [ "x${ZOO_LOG4J_PROP}" = "x" ]
-then
-    ZOO_LOG4J_PROP="INFO,CONSOLE"
-fi
-
-#add the zoocfg dir to classpath
-CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
-
-for i in "$ZOOBINDIR"/../src/java/lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../zookeeper-*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work for developers
-for d in "$ZOOBINDIR"/../build/lib/*.jar
-do
-   CLASSPATH="$d:$CLASSPATH"
-done
-
-#make it work for developers
-CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
-
-case "`uname`" in
-    CYGWIN*) cygwin=true ;;
-    *) cygwin=false ;;
-esac
-
-if $cygwin
-then
-    CLASSPATH=`cygpath -wp "$CLASSPATH"`
-fi
-
-#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkServer.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkServer.sh
deleted file mode 100644
index dd75a58..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkServer.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# If this scripted is run out of /usr/bin or some other system bin directory
-# it should be linked to and not copied. Things like java jar files are found
-# relative to the canonical path of this script.
-#
-
-# See the following page for extensive details on setting
-# up the JVM to accept JMX remote management:
-# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-# by default we allow local JMX connections
-if [ "x$JMXLOCALONLY" = "x" ]
-then
-    JMXLOCALONLY=false
-fi
-
-if [ "x$JMXDISABLE" = "x" ]
-then
-    echo "JMX enabled by default"
-    # for some reason these two options are necessary on jdk6 on Ubuntu
-    #   accord to the docs they are not necessary, but otw jconsole cannot
-    #   do a local attach
-    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
-else
-    echo "JMX disabled by user request"
-    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
-fi
-
-# Only follow symlinks if readlink supports it
-if readlink -f "$0" > /dev/null 2>&1
-then
-  ZOOBIN=`readlink -f "$0"`
-else
-  ZOOBIN="$0"
-fi
-ZOOBINDIR=`dirname "$ZOOBIN"`
-
-. "$ZOOBINDIR"/zkEnv.sh
-
-if [ "x$2" != "x" ]
-then
-    ZOOCFG="$ZOOCFGDIR/$2"
-fi
-
-if $cygwin
-then
-    ZOOCFG=`cygpath -wp "$ZOOCFG"`
-    # cygwin has a "kill" in the shell itself, gets confused
-    KILL=/bin/kill
-else
-    KILL=kill
-fi
-
-echo "Using config: $ZOOCFG"
-
-ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
-
-
-case $1 in
-start)
-    echo  "Starting zookeeper ... "
-    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
-    /bin/echo -n $! > "$ZOOPIDFILE"
-    echo STARTED
-    ;;
-stop)
-    echo "Stopping zookeeper ... "
-    if [ ! -f "$ZOOPIDFILE" ]
-    then
-    echo "error: could not find file $ZOOPIDFILE"
-    exit 1
-    else
-    $KILL -9 $(cat "$ZOOPIDFILE")
-    rm "$ZOOPIDFILE"
-    echo STOPPED
-    fi
-    ;;
-upgrade)
-    shift
-    echo "upgrading the servers to 3.*"
-    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
-    echo "Upgrading ... "
-    ;;
-restart)
-    shift
-    "$0" stop ${@}
-    sleep 3
-    "$0" start ${@}
-    ;;
-status)
-    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
-    if [ "x$STAT" = "x" ]
-    then
-        echo "Error contacting service. It is probably not running."
-    else
-        echo $STAT
-    fi
-    ;;
-*)
-    echo "Usage: $0 {start|stop|restart|status}" >&2
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkService.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkService.sh
deleted file mode 100644
index 46296df..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkService.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-zkcli_script=$1
-user=$2
-conf_dir=$3
-sudo su $user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkSmoke.sh
deleted file mode 100644
index 04f5a35..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/files/zkSmoke.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-zk_cli_shell=$1
-smoke_user=$2
-conf_dir=$3
-client_port=$4
-security_enabled=$5
-kinit_path_local=$6
-smoke_user_keytab=$7
-export ZOOKEEPER_EXIT_CODE=0
-test_output_file=/tmp/zkSmoke.out
-errors_expr="ERROR|Exception"
-acceptable_expr="SecurityException"
-zkhosts=` grep "^\s*server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
-zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
-echo "zk_node1=$zk_node1"
-if [[ $security_enabled == "True" ]]; then
-  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
-  sudo su $smoke_user -s /bin/bash - -c "$kinitcmd"
-fi
-
-function verify_output() {
-  if [ -f $test_output_file ]; then
-    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
-    if [ "$?" -eq 0 ]; then
-      echo "Error found in the zookeeper smoke test. Exiting."
-      echo $errors
-      exit 1
-    fi
-  fi
-}
-
-# Delete /zk_smoketest znode if exists
-sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${zk_cli_shell} -server $zk_node1:$client_port" 2>&1>$test_output_file
-# Create /zk_smoketest znode on one zookeeper server
-sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${zk_cli_shell} -server $zk_node1:$client_port" 2>&1>>$test_output_file
-verify_output
-
-for i in $zkhosts ; do
-  echo "Running test on host $i"
-  # Verify the data associated with znode across all the nodes in the zookeeper quorum
-  sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${zk_cli_shell} -server $i:$client_port"
-  sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${zk_cli_shell} -server $i:$client_port"
-  output=$(sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${zk_cli_shell} -server $i:$client_port")
-  echo $output | grep smoke_data
-  if [[ $? -ne 0 ]] ; then
-    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
-    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
-  fi
-done
-
-sudo su $smoke_user -s /bin/bash - -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${zk_cli_shell} -server $zk_node1:$client_port"
-if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
-  echo "Zookeeper Smoke Test: Failed" 
-else
-   echo "Zookeeper Smoke Test: Passed" 
-fi
-exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/__init__.py
deleted file mode 100644
index 35de4bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
deleted file mode 100644
index b52d6e3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
+++ /dev/null
@@ -1,88 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management.libraries.functions.default import default
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-hdp_stack_version = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
-
-# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
-version = default("/commandParams/version", None)
-
-#hadoop params
-if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-  zk_home = '/usr/hdp/current/zookeeper-client'
-  zk_bin = '/usr/hdp/current/zookeeper-client/bin'    # TODO Rolling Upgrade, needs to be server binary when starting server daemon...
-  zk_cli_shell = '/usr/hdp/current/zookeeper-client/bin/zkCli.sh'
-else:
-  zk_home = '/usr'
-  zk_bin = '/usr/lib/zookeeper/bin'
-  zk_cli_shell = "/usr/lib/zookeeper/bin/zkCli.sh"
-
-config_dir = "/etc/zookeeper/conf"
-zk_user =  config['configurations']['zookeeper-env']['zk_user']
-hostname = config['hostname']
-user_group = config['configurations']['cluster-env']['user_group']
-zk_env_sh_template = config['configurations']['zookeeper-env']['content']
-
-zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
-zk_data_dir = config['configurations']['zoo.cfg']['dataDir']
-zk_pid_dir = status_params.zk_pid_dir
-zk_pid_file = status_params.zk_pid_file
-zk_server_heapsize = "-Xmx1024m"
-
-clientPort = config['configurations']['zoo.cfg']['clientPort']
-
-if 'zoo.cfg' in config['configurations']:
-  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
-else:
-  zoo_cfg_properties_map = {}
-zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
-
-zk_principal_name = default("/configurations/zookeeper-env/zookeeper_principal_name", "zookeeper@EXAMPLE.COM")
-zk_principal = zk_principal_name.replace('_HOST',hostname.lower())
-
-java64_home = config['hostLevelParams']['java_home']
-
-zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
-zookeeper_hosts.sort()
-
-zk_keytab_path = config['configurations']['zookeeper-env']['zookeeper_keytab_path']
-zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
-zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-#log4j.properties
-if (('zookeeper-log4j' in config['configurations']) and ('content' in config['configurations']['zookeeper-log4j'])):
-  log4j_props = config['configurations']['zookeeper-log4j']['content']
-else:
-  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/service_check.py
deleted file mode 100644
index a4efa41..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/service_check.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class ZookeeperServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    File(format("{tmp_dir}/zkSmoke.sh"),
-         mode=0755,
-         content=StaticFile('zkSmoke.sh')
-    )
-
-    cmd_quorum = format("{tmp_dir}/zkSmoke.sh {zk_cli_shell} {smokeuser} {config_dir} {clientPort} "
-                  "{security_enabled} {kinit_path_local} {smokeUserKeytab}",
-                  smokeUserKeytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd_quorum,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True
-    )
-
-if __name__ == "__main__":
-  ZookeeperServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/status_params.py
deleted file mode 100644
index 36c5c30..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-zk_pid_dir = config['configurations']['zookeeper-env']['zk_pid_dir']
-zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper.py
deleted file mode 100644
index d03aaf7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper.py
+++ /dev/null
@@ -1,111 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import os
-
-from resource_management import *
-import sys
-
-
-def zookeeper(type = None):
-  import params
-
-  Directory(params.config_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  File(format("{config_dir}/zookeeper-env.sh"),
-       content=InlineTemplate(params.zk_env_sh_template),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-  
-
-  configFile("zoo.cfg", template_name="zoo.cfg.j2")
-  configFile("configuration.xsl", template_name="configuration.xsl.j2")
-
-  Directory(params.zk_pid_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_log_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_data_dir,
-            owner=params.zk_user,
-            recursive=True,
-            recursive_permission=True,
-            group=params.user_group
-  )
-
-  if type == 'server':
-    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
-
-    File(format("{zk_data_dir}/myid"),
-         mode = 0644,
-         content = myid
-    )
-
-  if (params.log4j_props != None):
-    File(format("{params.config_dir}/log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.zk_user,
-         content=params.log4j_props
-    )
-  elif (os.path.exists(format("{params.config_dir}/log4j.properties"))):
-    File(format("{params.config_dir}/log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.zk_user
-    )
-
-  if params.security_enabled:
-    if type == "server":
-      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-    else:
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-
-  File(format("{config_dir}/zoo_sample.cfg"),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-def configFile(name, template_name=None):
-  import params
-
-  File(format("{config_dir}/{name}"),
-       content=Template(template_name),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_client.py
deleted file mode 100644
index 4bffac3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_client.py
+++ /dev/null
@@ -1,42 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from zookeeper import zookeeper
-
-class ZookeeperClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    zookeeper(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  ZookeeperClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py
deleted file mode 100644
index cc828a4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_server.py
+++ /dev/null
@@ -1,100 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import re
-
-from resource_management import *
-from resource_management.libraries.functions import get_unique_id_and_date
-from resource_management.libraries.functions.decorator import retry
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
-from resource_management.libraries.functions.format import format
-from resource_management.core.shell import call
-
-from zookeeper import zookeeper
-from zookeeper_service import zookeeper_service
-
-
-@retry(times=10, sleep_time=2, err_class=Fail)
-def call_and_match_output(command, regex_expression, err_message):
-  """
-  Call the command and performs a regex match on the output for the specified expression.
-  :param command: Command to call
-  :param regex_expression: Regex expression to search in the output
-  """
-  # TODO Rolling Upgrade, does this work in Ubuntu? If it doesn't see dynamic_variable_interpretation.py to see how stdout was redirected
-  # to a temporary file, which was then read.
-  code, out = call(command, verbose=True)
-  if not (out and re.search(regex_expression, out, re.IGNORECASE)):
-    raise Fail(err_message)
-
-
-class ZookeeperServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    zookeeper(type='server')
-
-  def pre_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade pre-restart")
-    import params
-    env.set_params(params)
-
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
-      Execute(format("hdp-select set zookeeper-server {version}"))
-
-  def start(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    zookeeper_service(action = 'start')
-
-  def post_rolling_restart(self, env):
-    Logger.info("Executing Rolling Upgrade post-restart")
-    import params
-    env.set_params(params)
-
-    # Ensure that a quorum is still formed.
-    unique = get_unique_id_and_date()
-    create_command = format("echo 'create /{unique} mydata' | {zk_cli_shell}")
-    list_command = format("echo 'ls /' | {zk_cli_shell}")
-    delete_command = format("echo 'delete /{unique} ' | {zk_cli_shell}")
-
-    quorum_err_message = "Failed to establish zookeeper quorum"
-    call_and_match_output(create_command, 'Created', quorum_err_message)
-    call_and_match_output(list_command, r"\[.*?" + unique + ".*?\]", quorum_err_message)
-    call(delete_command, verbose=True)
-
-  def stop(self, env, rolling_restart=False):
-    import params
-    env.set_params(params)
-    zookeeper_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.zk_pid_file)
-
-if __name__ == "__main__":
-  ZookeeperServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_service.py
deleted file mode 100644
index 1495163..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/zookeeper_service.py
+++ /dev/null
@@ -1,50 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def zookeeper_service(action='start'):
-  import params
-
-  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
-
-  if action == 'start':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
-    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps -p `cat {zk_pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            not_if=no_op_test,
-            user=params.zk_user
-    )
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-
-      Execute(kinit_cmd,
-              user=params.smokeuser
-      )
-
-  elif action == 'stop':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
-    rm_pid = format("rm -f {zk_pid_file}")
-    Execute(daemon_cmd,
-            user=params.zk_user
-    )
-    Execute(rm_pid)

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/configuration.xsl.j2
deleted file mode 100644
index 8830c45..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/configuration.xsl.j2
+++ /dev/null
@@ -1,42 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-<?xml version="1.0"?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-  <tr>
-     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
-     <td><xsl:value-of select="value"/></td>
-     <td><xsl:value-of select="description"/></td>
-  </tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zoo.cfg.j2
deleted file mode 100644
index 1fbc0cc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zoo.cfg.j2
+++ /dev/null
@@ -1,53 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-{% for key, value in zoo_cfg_properties_map.iteritems() -%}
-  {{key}}={{value}}
-{% endfor %}
-{% for host in zookeeper_hosts -%}
-server.{{loop.index}}={{host}}:2888:3888
-{% endfor %}
-
-{% if security_enabled -%}
-authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
-jaasLoginRenew=3600000
-kerberos.removeHostFromPrincipal=true
-kerberos.removeRealmFromPrincipal=true
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
deleted file mode 100644
index 38f9721..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
deleted file mode 100644
index c3e9505..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Server {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{zk_keytab_path}}"
-principal="{{zk_principal}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/java/org/apache/ambari/server/api/services/KerberosServiceMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/KerberosServiceMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/KerberosServiceMetaInfoTest.java
index 538bbb5..8a4616a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/KerberosServiceMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/KerberosServiceMetaInfoTest.java
@@ -139,9 +139,11 @@ public class KerberosServiceMetaInfoTest {
   @Before
   public void before() throws Exception {
     File stackRoot = new File("src/main/resources/stacks");
+    File commonServicesRoot = new File("src/main/resources/common-services");
     LOG.info("Stacks file " + stackRoot.getAbsolutePath());
+    LOG.info("Common Services file " + commonServicesRoot.getAbsolutePath());
 
-    AmbariMetaInfo metaInfo = createAmbariMetaInfo(stackRoot, new File("target/version"), true);
+    AmbariMetaInfo metaInfo = createAmbariMetaInfo(stackRoot, commonServicesRoot, new File("target/version"), true);
     metaInfo.init();
 
     serviceInfo = metaInfo.getService("HDP", "2.2", "KERBEROS");
@@ -178,8 +180,8 @@ public class KerberosServiceMetaInfoTest {
     });
   }
 
-  private TestAmbariMetaInfo createAmbariMetaInfo(File stackRoot, File versionFile, boolean replayMocks) throws Exception {
-    TestAmbariMetaInfo metaInfo = new TestAmbariMetaInfo(stackRoot, versionFile);
+  private TestAmbariMetaInfo createAmbariMetaInfo(File stackRoot, File commonServicesRoot, File versionFile, boolean replayMocks) throws Exception {
+    TestAmbariMetaInfo metaInfo = new TestAmbariMetaInfo(stackRoot, commonServicesRoot, versionFile);
     if (replayMocks) {
       metaInfo.replayAllMocks();
 
@@ -216,8 +218,8 @@ public class KerberosServiceMetaInfoTest {
     AlertDefinitionFactory alertDefinitionFactory;
     OsFamily osFamily;
 
-    public TestAmbariMetaInfo(File stackRoot, File serverVersionFile) throws Exception {
-      super(stackRoot, null, serverVersionFile);
+    public TestAmbariMetaInfo(File stackRoot, File commonServicesRoot, File serverVersionFile) throws Exception {
+      super(stackRoot, commonServicesRoot, serverVersionFile);
       // MetainfoDAO
       metaInfoDAO = createNiceMock(MetainfoDAO.class);
       Class<?> c = getClass().getSuperclass();

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 3bbf1e5..61fabcb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -23,21 +23,27 @@ from mock.mock import MagicMock, patch
 from resource_management.core.exceptions import Fail
 
 class TestDatanode(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "configure",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertNoMoreResources()
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "start",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertResourceCalled('Directory', '/var/run/hadoop',
@@ -65,10 +71,12 @@ class TestDatanode(RMFTestCase):
 
   @patch("os.path.exists", new = MagicMock(return_value=False))
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "stop",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
@@ -97,19 +105,23 @@ class TestDatanode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertNoMoreResources()
 
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "start",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertResourceCalled('Directory', '/var/run/hadoop',
@@ -142,10 +154,12 @@ class TestDatanode(RMFTestCase):
 
     secured_json['hostLevelParams']['stack_version']= '2.2'
 
-    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "start",
-                       config_dict = secured_json
+                       config_dict = secured_json,
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertResourceCalled('Directory', '/var/run/hadoop',
@@ -181,10 +195,12 @@ class TestDatanode(RMFTestCase):
     secured_json['configurations']['hdfs-site']['dfs.datanode.address']= '0.0.0.0:10000'
     secured_json['configurations']['hdfs-site']['dfs.datanode.https.address']= '0.0.0.0:50000'
 
-    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "start",
-                       config_dict = secured_json
+                       config_dict = secured_json,
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertResourceCalled('Directory', '/var/run/hadoop',
@@ -212,10 +228,12 @@ class TestDatanode(RMFTestCase):
 
   @patch("os.path.exists", new = MagicMock(return_value=False))
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "stop",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
@@ -252,10 +270,12 @@ class TestDatanode(RMFTestCase):
 
     secured_json['hostLevelParams']['stack_version']= '2.2'
 
-    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "stop",
-                       config_dict = secured_json
+                       config_dict = secured_json,
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
@@ -294,10 +314,12 @@ class TestDatanode(RMFTestCase):
     secured_json['configurations']['hdfs-site']['dfs.datanode.address']= '0.0.0.0:10000'
     secured_json['configurations']['hdfs-site']['dfs.datanode.https.address']= '0.0.0.0:50000'
 
-    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
                        classname = "DataNode",
                        command = "stop",
-                       config_dict = secured_json
+                       config_dict = secured_json,
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop',
                               owner = 'hdfs',
@@ -447,8 +469,13 @@ class TestDatanode(RMFTestCase):
     process.returncode = 0
     process_mock.return_value = process
 
-    self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
-      classname = "DataNode", command = "post_rolling_restart", config_file="default.json",  )
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
+                       classname = "DataNode",
+                       command = "post_rolling_restart",
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
+    )
 
     self.assertTrue(process_mock.called)
     self.assertEqual(process_mock.call_count,1)
@@ -463,8 +490,13 @@ class TestDatanode(RMFTestCase):
     process_mock.return_value = process
 
     try:
-      self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
-        classname = "DataNode", command = "post_rolling_restart", config_file="default.json",  )
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
+                         classname = "DataNode",
+                         command = "post_rolling_restart",
+                         config_file = "default.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
+      )
       self.fail('Missing DataNode should have caused a failure')
     except Fail,fail:
       self.assertTrue(process_mock.called)
@@ -480,8 +512,13 @@ class TestDatanode(RMFTestCase):
     process_mock.return_value = process
 
     try:
-      self.executeScript("2.0.6/services/HDFS/package/scripts/datanode.py",
-        classname = "DataNode", command = "post_rolling_restart", config_file="default.json",  )
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/datanode.py",
+                         classname = "DataNode",
+                         command = "post_rolling_restart",
+                         config_file = "default.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
+      )
       self.fail('Invalid return code should cause a failure')
     except Fail,fail:
       self.assertTrue(process_mock.called)

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
index 7c9e42b..be0fac1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
@@ -30,12 +30,16 @@ from stacks.utils.RMFTestCase import *
 @patch.object(contextlib,"closing", new = MagicMock())
 @patch("os.path.exists", new = MagicMock(return_value=True))
 class Test(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_generate_configs_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/hdfs_client.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
                        classname = "HdfsClient",
                        command = "generate_configs",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/tmp',
                               recursive = True,

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
index 3fdd412..96a703e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
@@ -22,21 +22,27 @@ from ambari_commons import OSCheck
 from mock.mock import MagicMock, patch
 
 class TestJournalnode(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
                        classname = "JournalNode",
                        command = "configure",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertNoMoreResources()
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
                        classname = "JournalNode",
                        command = "start",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertResourceCalled('Directory', '/var/run/hadoop',
@@ -63,10 +69,12 @@ class TestJournalnode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
                        classname = "JournalNode",
                        command = "stop",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
                               owner = 'hdfs',
@@ -90,19 +98,23 @@ class TestJournalnode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
                        classname = "JournalNode",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertNoMoreResources()
 
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
                        classname = "JournalNode",
                        command = "start",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertResourceCalled('Directory', '/var/run/hadoop',
@@ -129,10 +141,12 @@ class TestJournalnode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/journalnode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
                        classname = "JournalNode",
                        command = "stop",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop/hdfs',
                               owner = 'hdfs',

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 3986e68..9c65210 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -28,21 +28,27 @@ import subprocess
 
 @patch.object(shell, "call", new=MagicMock(return_value=(1,"")))
 class TestNamenode(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
 
   def test_configure_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "configure",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertNoMoreResources()
 
   def test_start_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "start",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertResourceCalled('File', '/tmp/checkForFormat.sh',
@@ -128,10 +134,12 @@ class TestNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "stop",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
                               action = ['delete'],
@@ -147,19 +155,23 @@ class TestNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_configure_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "configure",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertNoMoreResources()
 
   def test_start_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "start",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertResourceCalled('File', '/tmp/checkForFormat.sh',
@@ -248,10 +260,12 @@ class TestNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_stop_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "stop",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid',
                               action = ['delete'],
@@ -267,10 +281,12 @@ class TestNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_start_ha_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "start",
-                       config_file="ha_default.json"
+                       config_file = "ha_default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
@@ -341,10 +357,12 @@ class TestNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_start_ha_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "start",
-                       config_file="ha_secured.json"
+                       config_file = "ha_secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
@@ -418,10 +436,12 @@ class TestNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_decommission_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "decommission",
-                       config_file="default.json"
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
                               owner = 'hdfs',
@@ -437,10 +457,12 @@ class TestNamenode(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_decommission_update_exclude_file_only(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "decommission",
-                       config_file="default_update_exclude_file_only.json"
+                       config_file = "default_update_exclude_file_only.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
                               owner = 'hdfs',
@@ -451,10 +473,12 @@ class TestNamenode(RMFTestCase):
 
 
   def test_decommission_ha_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "decommission",
-                       config_file="ha_default.json"
+                       config_file = "ha_default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
                               owner = 'hdfs',
@@ -462,19 +486,21 @@ class TestNamenode(RMFTestCase):
                               group = 'hadoop',
                               )
     self.assertResourceCalled('Execute', '', user = 'hdfs')
-    self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -refreshNodes', 
-                              user = 'hdfs', 
+    self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -refreshNodes',
+                              user = 'hdfs',
                               conf_dir = '/etc/hadoop/conf',
                               bin_dir = '/usr/bin',
                               kinit_override = True)
-    self.assertNoMoreResources()    
+    self.assertNoMoreResources()
 
 
   def test_decommission_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                        classname = "NameNode",
                        command = "decommission",
-                       config_file="secured.json"
+                       config_file = "secured.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
         owner = 'hdfs',
@@ -577,10 +603,12 @@ class TestNamenode(RMFTestCase):
       ll = subprocess.Popen()
       self.assertTrue(isinstance(ll.stdout.readline(),str))
       try:
-        self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+        self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                            classname = "NameNode",
                            command = "rebalancehdfs",
-                           config_file="rebalancehdfs_default.json"
+                           config_file = "rebalancehdfs_default.json",
+                           hdp_stack_version = self.STACK_VERSION,
+                           target = RMFTestCase.TARGET_COMMON_SERVICES
         )
         self.fail("Exception was not thrown")
       except  resource_management.core.exceptions.Fail:
@@ -590,10 +618,12 @@ class TestNamenode(RMFTestCase):
       Popen_Mock.return_value = 0
       ll = subprocess.Popen()
       self.assertTrue(isinstance(ll.stdout.readline(),str))
-      self.executeScript("2.0.6/services/HDFS/package/scripts/namenode.py",
+      self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
                          classname = "NameNode",
                          command = "rebalancehdfs",
-                         config_file="rebalancehdfs_default.json"
+                         config_file = "rebalancehdfs_default.json",
+                         hdp_stack_version = self.STACK_VERSION,
+                         target = RMFTestCase.TARGET_COMMON_SERVICES
       )
       self.assertEqual(pso.call_count, 2, "Output was not parsed properly")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/17b71553/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
index 3ef2b17..d31f366 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
@@ -23,26 +23,33 @@ from mock.mock import MagicMock, call, patch
 
 @patch.object(resource_management.libraries.functions, "get_unique_id_and_date", new = MagicMock(return_value=''))
 class TestServiceCheck(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "HDFS/2.1.0.2.0/package"
+  STACK_VERSION = "2.0.6"
+
   def test_service_check_default(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/service_check.py",
-                        classname="HdfsServiceCheck",
-                        command="service_check",
-                        config_file="default.json"
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
+                       classname = "HdfsServiceCheck",
+                       command = "service_check",
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
-    
+
     self.assert_service_check()
     self.assertNoMoreResources()
-    
+
   def test_service_check_secured(self):
-    self.executeScript("2.0.6/services/HDFS/package/scripts/service_check.py",
-                        classname="HdfsServiceCheck",
-                        command="service_check",
-                        config_file="default.json"
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/service_check.py",
+                       classname = "HdfsServiceCheck",
+                       command = "service_check",
+                       config_file = "default.json",
+                       hdp_stack_version = self.STACK_VERSION,
+                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
-    
+
     self.assert_service_check()
     self.assertNoMoreResources()
-        
+
   def assert_service_check(self):
     self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -safemode get | grep OFF',
         logoutput = True,


[29/37] ambari git commit: AMBARI-8746: Common Services: Refactor HDP 2.0.6 HIVE, OOZIE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml
deleted file mode 100644
index fb852f7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-exec-log4j.xml
+++ /dev/null
@@ -1,111 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom hive-exec-log4j</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-
-hive.log.threshold=ALL
-hive.root.logger=INFO,FA
-hive.log.dir=${java.io.tmpdir}/${user.name}
-hive.query.id=hadoop
-hive.log.file=${hive.query.id}.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=${hive.log.threshold}
-
-#
-# File Appender
-#
-
-log4j.appender.FA=org.apache.log4j.FileAppender
-log4j.appender.FA.File=${hive.log.dir}/${hive.log.file}
-log4j.appender.FA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
-
-
-log4j.category.DataNucleus=ERROR,FA
-log4j.category.Datastore=ERROR,FA
-log4j.category.Datastore.Schema=ERROR,FA
-log4j.category.JPOX.Datastore=ERROR,FA
-log4j.category.JPOX.Plugin=ERROR,FA
-log4j.category.JPOX.MetaData=ERROR,FA
-log4j.category.JPOX.Query=ERROR,FA
-log4j.category.JPOX.General=ERROR,FA
-log4j.category.JPOX.Enhancer=ERROR,FA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA
-
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml
deleted file mode 100644
index a978ef7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-log4j.xml
+++ /dev/null
@@ -1,120 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-
-  <property>
-    <name>content</name>
-    <description>Custom log4j.properties</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hive.log.threshold=ALL
-hive.root.logger=INFO,DRFA
-hive.log.dir=${java.io.tmpdir}/${user.name}
-hive.log.file=hive.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hive.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshold=${hive.log.threshold}
-
-#
-# Daily Rolling File Appender
-#
-# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files
-# for different CLI session.
-#
-# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-
-log4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n
-log4j.appender.console.encoding=UTF-8
-
-#custom logging levels
-#log4j.logger.xxx=DEBUG
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
-
-
-log4j.category.DataNucleus=ERROR,DRFA
-log4j.category.Datastore=ERROR,DRFA
-log4j.category.Datastore.Schema=ERROR,DRFA
-log4j.category.JPOX.Datastore=ERROR,DRFA
-log4j.category.JPOX.Plugin=ERROR,DRFA
-log4j.category.JPOX.MetaData=ERROR,DRFA
-log4j.category.JPOX.Query=ERROR,DRFA
-log4j.category.JPOX.General=ERROR,DRFA
-log4j.category.JPOX.Enhancer=ERROR,DRFA
-
-
-# Silence useless ZK logs
-log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA
-    </value>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 67be680..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,329 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration supports_final="true">
-
-  <property>
-    <name>hive.heapsize</name>
-    <value>1024</value>
-    <description>Hive Java heap size</description>
-  </property>
-
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property require-input="true">
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
-    <property-type>PASSWORD</property-type>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value>false</value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.pre.event.listeners</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
-    <description>Pre-event listener classes to be loaded on the metastore side to run code
-      whenever databases, tables, and partitions are created, altered, or dropped.
-      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
-      if metastore-side authorization is desired.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.pre.event.listeners</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
-    <description>Pre-event listener classes to be loaded on the metastore side to run code
-      whenever databases, tables, and partitions are created, altered, or dropped.
-      Set to org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
-      if metastore-side authorization is desired.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.metastore.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
-    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-    <description>Impersonate the connected user. By default HiveServer2 performs the query processing as the user who
-      submitted the query. But if the parameter is set to false, the query will run as the user that the hiveserver2
-      process runs as.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.impersonation</name>
-    <description>Enable user impersonation for HiveServer2</description>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.server2.authentication</name>
-    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
-    <value>NONE</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable HDFS filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable local filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-    <description>If the tables being joined are bucketized on the join columns, and the number of buckets in one table
-      is a multiple of the number of buckets in the other table, the buckets can be joined with each other by setting
-      this parameter as true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>false</value>
-    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
-    of buckets, a sort-merge join can be performed by setting this parameter as true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.execution.enabled</name>
-    <value>false</value>
-    <description>This flag controls the vectorized mode of query execution as documented in HIVE-4160 (as of Hive 0.13.0)
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication</name>
-    <value>true</value>
-    <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.index.filter</name>
-    <value>true</value>
-    <description>
-    Whether to enable automatic use of indexes
-    </description>
-  </property>
-
-  <property>
-    <name>hive.server2.thrift.port</name>
-    <value>10000</value>
-    <description>
-      TCP port number to listen on, default 10000.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.server2.support.dynamic.service.discovery</name>
-    <value>false</value>
-    <description>Whether HiveServer2 supports dynamic service discovery for its
-      clients. To support this, each instance of HiveServer2 currently uses
-      ZooKeeper to register itself, when it is brought up. JDBC/ODBC clients
-      should use the ZooKeeper ensemble: hive.zookeeper.quorum in their
-      connection string.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.server2.zookeeper.namespace</name>
-    <value>hiveserver2</value>
-    <description>The parent node in ZooKeeper used by HiveServer2 when
-      supporting dynamic service discovery.
-    </description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-env.xml
deleted file mode 100644
index 14a473f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-env.xml
+++ /dev/null
@@ -1,54 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <!-- webhcat-env.sh -->
-  <property>
-    <name>content</name>
-    <description>webhcat-env.sh content</description>
-    <value>
-# The file containing the running pid
-PID_FILE={{webhcat_pid_file}}
-
-TEMPLETON_LOG_DIR={{templeton_log_dir}}/
-
-
-WEBHCAT_LOG_DIR={{templeton_log_dir}}/
-
-# The console error log
-ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
-
-# The console log
-CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
-
-#TEMPLETON_JAR=templeton_jar_name
-
-#HADOOP_PREFIX=hadoop_prefix
-
-#HCAT_PREFIX=hive_prefix
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME={{hadoop_home}}
-    </value>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-site.xml
deleted file mode 100644
index 335048e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,135 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration supports_final="true">
-
-  <property>
-    <name>templeton.port</name>
-    <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.home</name>
-    <value>hive.tar.gz/hive</value>
-    <description>The path to the Hive home within the tar. Has no effect if templeton.hive.archive is not set.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat.home</name>
-    <value>hive.tar.gz/hive/hcatalog</value>
-    <description>The path to the HCat home within the tar. Has no effect if templeton.hive.archive is not set.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value>hive.metastore.local=false, hive.metastore.uris=thrift://localhost:9933, hive.metastore.sasl.enabled=false</value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-    <name>templeton.override.enabled</name>
-    <value>false</value>
-    <description>Enable the override path in templeton.override.jars</description>
-  </property>
-
-  <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/46e9bcb8/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.mysql.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
deleted file mode 100644
index b0415b1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/etc/hive-schema-0.12.0.mysql.sql
+++ /dev/null
@@ -1,777 +0,0 @@
--- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
---
--- Host: localhost    Database: test
--- ------------------------------------------------------
--- Server version	5.5.25
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `BUCKETING_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `BUCKETING_COLS_N49` (`SD_ID`),
-  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `CDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `CDS` (
-  `CD_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `COLUMNS_V2`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
-  `CD_ID` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE_NAME` varchar(4000) DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
-  KEY `COLUMNS_V2_N49` (`CD_ID`),
-  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DATABASE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
-  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
-  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DBS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DBS` (
-  `DB_ID` bigint(20) NOT NULL,
-  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_ID`),
-  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `DB_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
-  `DB_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`DB_GRANT_ID`),
-  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `DB_PRIVS_N49` (`DB_ID`),
-  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `GLOBAL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
-  `USER_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`USER_GRANT_ID`),
-  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `IDXS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `IDXS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DEFERRED_REBUILD` bit(1) NOT NULL,
-  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`),
-  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
-  KEY `IDXS_N51` (`SD_ID`),
-  KEY `IDXS_N50` (`INDEX_TBL_ID`),
-  KEY `IDXS_N49` (`ORIG_TBL_ID`),
-  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `INDEX_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
-  `INDEX_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
-  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
-  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `NUCLEUS_TABLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
-  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`CLASS_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITIONS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITIONS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`),
-  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
-  KEY `PARTITIONS_N49` (`TBL_ID`),
-  KEY `PARTITIONS_N50` (`SD_ID`),
-  KEY `PARTITIONS_N51` (`LINK_TARGET_ID`),
-  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
-  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `PARTITIONS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_EVENTS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
-  `PART_NAME_ID` bigint(20) NOT NULL,
-  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `EVENT_TIME` bigint(20) NOT NULL,
-  `EVENT_TYPE` int(11) NOT NULL,
-  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_NAME_ID`),
-  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEYS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
-  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
-  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_KEY_VALS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
-  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PARTITION_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
-  `PART_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
-  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
-  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
-  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
-  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
-  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `PART_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
-  `PART_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_ID` bigint(20) DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`PART_GRANT_ID`),
-  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `PART_PRIVS_N49` (`PART_ID`),
-  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLES` (
-  `ROLE_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`ROLE_ID`),
-  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `ROLE_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
-  `ROLE_GRANT_ID` bigint(20) NOT NULL,
-  `ADD_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ROLE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`ROLE_GRANT_ID`),
-  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `ROLE_MAP_N49` (`ROLE_ID`),
-  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SDS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `CD_ID` bigint(20) DEFAULT NULL,
-  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `IS_COMPRESSED` bit(1) NOT NULL,
-  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `NUM_BUCKETS` int(11) NOT NULL,
-  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SERDE_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`),
-  KEY `SDS_N49` (`SERDE_ID`),
-  KEY `SDS_N50` (`CD_ID`),
-  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
-  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SD_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
-  KEY `SD_PARAMS_N49` (`SD_ID`),
-  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SEQUENCE_TABLE`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
-  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `NEXT_VAL` bigint(20) NOT NULL,
-  PRIMARY KEY (`SEQUENCE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDES` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SERDE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
-  `SERDE_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
-  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
-  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_NAMES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
-  `SD_ID` bigint(20) NOT NULL,
-  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
-  `SD_ID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
-  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
-  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_STRING_LIST_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
-  `STRING_LIST_ID` bigint(20) NOT NULL,
-  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
-  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SKEWED_VALUES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
-  `SD_ID_OID` bigint(20) NOT NULL,
-  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
-  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
-  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
-  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
-  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `SORT_COLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `SORT_COLS` (
-  `SD_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `ORDER` int(11) NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
-  KEY `SORT_COLS_N49` (`SD_ID`),
-  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TABLE_PARAMS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
-  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
-  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBLS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBLS` (
-  `TBL_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `DB_ID` bigint(20) DEFAULT NULL,
-  `LAST_ACCESS_TIME` int(11) NOT NULL,
-  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `RETENTION` int(11) NOT NULL,
-  `SD_ID` bigint(20) DEFAULT NULL,
-  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `VIEW_EXPANDED_TEXT` mediumtext,
-  `VIEW_ORIGINAL_TEXT` mediumtext,
-  `LINK_TARGET_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_ID`),
-  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
-  KEY `TBLS_N50` (`SD_ID`),
-  KEY `TBLS_N49` (`DB_ID`),
-  KEY `TBLS_N51` (`LINK_TARGET_ID`),
-  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
-  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
-  CONSTRAINT `TBLS_FK3` FOREIGN KEY (`LINK_TARGET_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_COL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
-  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
-  `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
-  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
-  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TBL_PRIVS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
-  `TBL_GRANT_ID` bigint(20) NOT NULL,
-  `CREATE_TIME` int(11) NOT NULL,
-  `GRANT_OPTION` smallint(6) NOT NULL,
-  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TBL_ID` bigint(20) DEFAULT NULL,
-  PRIMARY KEY (`TBL_GRANT_ID`),
-  KEY `TBL_PRIVS_N49` (`TBL_ID`),
-  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
-  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TAB_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TBL_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `PART_COL_STATS`
---
-CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
- `CS_ID` bigint(20) NOT NULL,
- `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
- `PART_ID` bigint(20) NOT NULL,
- `LONG_LOW_VALUE` bigint(20),
- `LONG_HIGH_VALUE` bigint(20),
- `DOUBLE_HIGH_VALUE` double(53,4),
- `DOUBLE_LOW_VALUE` double(53,4),
- `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
- `NUM_NULLS` bigint(20) NOT NULL,
- `NUM_DISTINCTS` bigint(20),
- `AVG_COL_LEN` double(53,4),
- `MAX_COL_LEN` bigint(20),
- `NUM_TRUES` bigint(20),
- `NUM_FALSES` bigint(20),
- `LAST_ANALYZED` bigint(20) NOT NULL,
-  PRIMARY KEY (`CS_ID`),
-  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
---
--- Table structure for table `TYPES`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPES` (
-  `TYPES_ID` bigint(20) NOT NULL,
-  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  PRIMARY KEY (`TYPES_ID`),
-  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Table structure for table `TYPE_FIELDS`
---
-
-/*!40101 SET @saved_cs_client     = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
-  `TYPE_NAME` bigint(20) NOT NULL,
-  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
-  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
-  `INTEGER_IDX` int(11) NOT NULL,
-  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
-  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
-  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
--- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
-CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
-(
-    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
-    `MASTER_KEY` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`KEY_ID`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
--- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
-CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
-(
-    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
-    `TOKEN` VARCHAR(767) BINARY NULL,
-    PRIMARY KEY (`TOKEN_IDENT`)
-) ENGINE=INNODB DEFAULT CHARSET=latin1;
-
---
--- Table structure for VERSION
---
-CREATE TABLE IF NOT EXISTS `VERSION` (
-  `VER_ID` BIGINT NOT NULL,
-  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
-  `VERSION_COMMENT` VARCHAR(255),
-  PRIMARY KEY (`VER_ID`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1;
-
-INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '0.12.0', 'Hive release version 0.12.0');
-
-/*!40101 SET character_set_client = @saved_cs_client */;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-08-23  0:56:31


[20/37] ambari git commit: AMBARI-8745: Common Services: Refactor HDP2.0.6 FLUME, GANGLIA, HBASE services (Jayush Luniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/files/draining_servers.rb
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/files/draining_servers.rb b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/files/draining_servers.rb
new file mode 100644
index 0000000..5bcb5b6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/files/draining_servers.rb
@@ -0,0 +1,164 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper 
+
+require 'optparse'
+include Java
+
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.client.HBaseAdmin
+import org.apache.hadoop.hbase.zookeeper.ZKUtil
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "draining_servers"
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
+  opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
+                 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
+  opts.on('-h', '--help', 'Display usage information') do
+    puts opts
+    exit
+  end
+  options[:debug] = false
+  opts.on('-d', '--debug', 'Display extra debug logging') do
+    options[:debug] = true
+  end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+  serverInfos = admin.getClusterStatus().getServerInfo()
+  servers = []
+  for server in serverInfos
+    servers << server.getServerName()
+  end
+  return servers
+end
+
+def getServerNames(hostOrServers, config)
+  ret = []
+  
+  for hostOrServer in hostOrServers
+    # check whether it is already serverName. No need to connect to cluster
+    parts = hostOrServer.split(',')
+    if parts.size() == 3
+      ret << hostOrServer
+    else 
+      admin = HBaseAdmin.new(config) if not admin
+      servers = getServers(admin)
+
+      hostOrServer = hostOrServer.gsub(/:/, ",")
+      for server in servers 
+        ret << server if server.start_with?(hostOrServer)
+      end
+    end
+  end
+  
+  admin.close() if admin
+  return ret
+end
+
+def addServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.createAndFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+def removeServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+    for server in servers
+      node = ZKUtil.joinZNode(parentZnode, server)
+      ZKUtil.deleteNodeFailSilent(zkw, node)
+    end
+  ensure
+    zkw.close()
+  end
+end
+
+# list servers in draining mode
+def listServers(options)
+  config = HBaseConfiguration.create()
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+  servers.each {|server| puts server}
+end
+
+hostOrServers = ARGV[1..ARGV.size()]
+
+# Create a logger and disable the DEBUG-level annoying client logging
+def configureLogging(options)
+  apacheLogger = LogFactory.getLog(NAME)
+  # Configure log4j to not spew so much
+  unless (options[:debug]) 
+    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+    logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+  end
+  return apacheLogger
+end
+
+# Create a logger and save it to ruby global
+$LOG = configureLogging(options)
+case ARGV[0]
+  when 'add'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    addServers(options, hostOrServers)
+  when 'remove'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    removeServers(options, hostOrServers)
+  when 'list'
+    listServers(options)
+  else
+    puts optparse
+    exit 3
+end

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/files/hbaseSmokeVerify.sh
new file mode 100644
index 0000000..5c320c0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+grep -q $data /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/functions.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/functions.py
new file mode 100644
index 0000000..e6e7fb9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/functions.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
new file mode 100644
index 0000000..ea99288
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
@@ -0,0 +1,158 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+from resource_management import *
+import sys
+
+def hbase(name=None # 'master' or 'regionserver' or 'client'
+              ):
+  import params
+
+  Directory( params.hbase_conf_dir_prefix,
+      mode=0755
+  )
+
+  Directory( params.hbase_conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      recursive = True
+  )
+
+  Directory (params.tmp_dir,
+             owner = params.hbase_user,
+             mode=0775,
+             recursive = True,
+             recursive_permission = True
+  )
+
+  Directory (params.local_dir,
+             owner = params.hbase_user,
+             group = params.user_group,
+             mode=0775,
+             recursive = True
+  )
+
+  Directory (os.path.join(params.local_dir, "jars"),
+             owner = params.hbase_user,
+             group = params.user_group,
+             mode=0775,
+             recursive = True
+  )
+
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  if 'hdfs-site' in params.config['configurations']:
+    XmlConfig( "hdfs-site.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+
+    XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+    )
+
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+            conf_dir = params.hbase_conf_dir,
+            configurations = params.config['configurations']['hbase-policy'],
+            configuration_attributes=params.config['configuration_attributes']['hbase-policy'],
+            owner = params.hbase_user,
+            group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+
+  File(format("{hbase_conf_dir}/hbase-env.sh"),
+       owner = params.hbase_user,
+       content=InlineTemplate(params.hbase_env_sh_template)
+  )     
+       
+  hbase_TemplateConfig( params.metric_prop_file_name,
+    tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
+  )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{name}_jaas.conf"))
+  
+  if name != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      recursive = True
+    )
+  
+    Directory (params.log_dir,
+      owner = params.hbase_user,
+      recursive = True
+    )
+
+  if (params.log4j_props != None):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.hbase_user,
+         content=params.log4j_props
+    )
+  elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))):
+    File(format("{params.hbase_conf_dir}/log4j.properties"),
+      mode=0644,
+      group=params.user_group,
+      owner=params.hbase_user
+    )
+  if name in ["master","regionserver"]:
+    params.HdfsDirectory(params.hbase_hdfs_root_dir,
+                         action="create_delayed",
+                         owner=params.hbase_user
+    )
+    params.HdfsDirectory(params.hbase_staging_dir,
+                         action="create_delayed",
+                         owner=params.hbase_user,
+                         mode=0711
+    )
+    params.HdfsDirectory(None, action="create")
+
+def hbase_TemplateConfig(name, 
+                         tag=None
+                         ):
+  import params
+
+  TemplateConfig( format("{hbase_conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
new file mode 100644
index 0000000..043ad11
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+
+         
+class HbaseClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    
+    hbase(name='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
new file mode 100644
index 0000000..a623927
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hbase_decommission(env):
+  import params
+
+  env.set_params(params)
+  kinit_cmd = params.kinit_cmd
+
+  File(params.region_drainer,
+       content=StaticFile("draining_servers.rb"),
+       mode=0755
+  )
+  
+  if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","):
+    hosts = params.hbase_excluded_hosts.split(",")
+  elif params.hbase_included_hosts and params.hbase_included_hosts.split(","):
+    hosts = params.hbase_included_hosts.split(",")
+
+  if params.hbase_drain_only:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove {host}")
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+        pass
+    pass
+
+  else:
+    for host in hosts:
+      if host:
+        regiondrainer_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
+        regionmover_cmd = format(
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
+
+        Execute(regiondrainer_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+
+        Execute(regionmover_cmd,
+                user=params.hbase_user,
+                logoutput=True
+        )
+      pass
+    pass
+  pass
+  
+
+  pass

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
new file mode 100644
index 0000000..c0e84b4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+from hbase_decommission import hbase_decommission
+import upgrade
+
+         
+class HbaseMaster(Script):
+  def install(self, env):
+    self.install_packages(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(name='master')
+    
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-master")
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'master',
+      action = 'start'
+    )
+    
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'master',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-master.pid")
+    check_process_status(pid_file)
+
+  def decommission(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_decommission(env)
+
+
+if __name__ == "__main__":
+  HbaseMaster().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..ea8e3d4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+import upgrade
+
+         
+class HbaseRegionServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(name='regionserver')
+      
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+    upgrade.prestart(env, "hbase-regionserver")
+
+  def post_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+    upgrade.post_regionserver(env)
+
+  def start(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'regionserver',
+      action = 'start'
+    )
+
+    
+  def stop(self, env, rolling_restart=False):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-regionserver.pid")
+    check_process_status(pid_file)
+    
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+    
+
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
new file mode 100644
index 0000000..99cae6a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+    
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {hbase_conf_dir}")
+    pid_file = format("{pid_dir}/hbase-{hbase_user}-{role}.pid")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
+    
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      
+      Execute ( daemon_cmd,
+        not_if = no_op_test,
+        user = params.hbase_user
+      )
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role}")
+
+      Execute ( daemon_cmd,
+        user = params.hbase_user,
+        # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode
+        timeout = 30,
+        on_timeout = format("! ( {no_op_test} ) || sudo -H -E kill -9 `cat {pid_file}`"),
+      )
+      
+      Execute (format("rm -f {pid_file}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
new file mode 100644
index 0000000..79d47bf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from functions import calc_xmn_from_xms
+from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+exec_tmp_dir = Script.get_tmp_dir()
+
+version = default("/commandParams/version", None)
+
+hdp_stack_version = str(config['hostLevelParams']['stack_version'])
+hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
+
+#hadoop params
+if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  hadoop_bin_dir = format("/usr/hdp/current/hadoop-client/bin")
+  daemon_script = format('/usr/hdp/current/hbase-client/bin/hbase-daemon.sh')
+  region_mover = format('/usr/hdp/current/hbase-client/bin/region_mover.rb')
+  region_drainer = format('/usr/hdp/current/hbase-client/bin/draining_servers.rb')
+  hbase_cmd = format('/usr/hdp/current/hbase-client/bin/hbase')
+else:
+  hadoop_bin_dir = "/usr/bin"
+  daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+  region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+  region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+  hbase_cmd = "/usr/lib/hbase/bin/hbase"
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hbase_conf_dir_prefix = "/etc/hbase"
+hbase_conf_dir = format("{hbase_conf_dir_prefix}/conf")
+hbase_excluded_hosts = config['commandParams']['excluded_hosts']
+hbase_drain_only = default("/commandParams/mark_draining_only",False)
+hbase_included_hosts = config['commandParams']['included_hosts']
+
+hbase_user = status_params.hbase_user
+hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+_authentication = config['configurations']['core-site']['hadoop.security.authentication']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
+
+regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
+regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max']
+regionserver_xmn_percent = config['configurations']['hbase-env']['hbase_regionserver_xmn_ratio']
+regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+# TODO UPGRADE default, update site during upgrade
+_local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
+local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
+
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
+
+ams_collector_hosts = default("/clusterHostInfo/metric_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+if has_metric_collector:
+  metric_collector_host = ams_collector_hosts[0]
+
+# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case
+if 'slave_hosts' in config['clusterHostInfo']:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+else:
+  rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
+
+smoke_test_user = config['configurations']['cluster-env']['smokeuser']
+smokeuser_permissions = "RWXCA"
+service_check_data = functions.get_unique_id_and_date()
+user_group = config['configurations']['cluster-env']["user_group"]
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+  regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
+
+master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+if security_enabled:
+  kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
+else:
+  kinit_cmd = ""
+
+#log4j.properties
+if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):
+  log4j_props = config['configurations']['hbase-log4j']['content']
+else:
+  log4j_props = None
+  
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
+hbase_staging_dir = "/apps/hbase/staging"
+#for create_hdfs_directory
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+import functools
+#create partial functions with common arguments for every HdfsDirectory call
+#to create hdfs directory we need to call params.HdfsDirectory in code
+HdfsDirectory = functools.partial(
+  HdfsDirectory,
+  conf_dir=hadoop_conf_dir,
+  hdfs_user=hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/service_check.py
new file mode 100644
index 0000000..15a306b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/service_check.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import functions
+
+
+class HbaseServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    output_file = "/apps/hbase/data/ambarismoketest"
+    test_cmd = format("fs -test -e {output_file}")
+    smokeuser_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
+    hbase_servicecheck_file = format("{exec_tmp_dir}/hbase-smoke.sh")
+  
+    File( format("{exec_tmp_dir}/hbaseSmokeVerify.sh"),
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+  
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+    
+    if params.security_enabled:    
+      hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
+      grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}")
+  
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+      
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+      )
+
+    servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{smokeuser_kinit_cmd} {exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}")
+  
+    Execute( servicecheckcmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+  
+    Execute ( smokeverifycmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+    
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
new file mode 100644
index 0000000..850ec8b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
+hbase_user = config['configurations']['hbase-env']['hbase_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
new file mode 100644
index 0000000..6f2e258
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
@@ -0,0 +1,49 @@
+
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+from resource_management.core.resources.system import Execute
+from resource_management.core.shell import call
+from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.decorator import retry
+
+def prestart(env, hdp_component):
+  import params
+
+  if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    Execute("hdp-select set {0} {1}".format(hdp_component, params.version))
+
+def post_regionserver(env):
+  import params
+  env.set_params(params)
+
+  check_cmd = "echo 'status \"simple\"' | {0} shell".format(params.hbase_cmd)
+
+  exec_cmd = "{0} {1}".format(params.kinit_cmd, check_cmd)
+  call_and_match(exec_cmd, params.hbase_user, params.hostname.lower() + ":")
+
+
+@retry(times=15, sleep_time=2, err_class=Fail)
+def call_and_match(cmd, user, regex):
+
+  code, out = call(cmd, user=user)
+
+  if not (out and re.search(regex, out)):
+    raise Fail("Could not verify RS available")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
new file mode 100644
index 0000000..b07378b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -0,0 +1,108 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+{% if has_metric_collector %}
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+*.sink.timline*.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.period=10
+hbase.collector={{metric_collector_host}}:8188
+
+jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+jvm.period=10
+jvm.collector={{metric_collector_host}}:8188
+
+rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+rpc.period=10
+rpc.collector={{metric_collector_host}}:8188
+
+hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.sink.timeline.period=10
+hbase.sink.timeline.collector={{metric_collector_host}}:8188
+
+{% else %}
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8663
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
new file mode 100644
index 0000000..d13540f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -0,0 +1,102 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+{% if has_metric_collector %}
+
+hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.period=10
+hbase.collector={{metric_collector_host}}:8188
+
+jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+jvm.period=10
+jvm.collector={{metric_collector_host}}:8188
+
+rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+rpc.period=10
+rpc.collector={{metric_collector_host}}:8188
+
+hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+hbase.sink.timeline.period=10
+hbase.sink.timeline.collector={{metric_collector_host}}:8188
+
+{% else %}
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8656
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8656
+
+{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase-smoke.sh.j2
new file mode 100644
index 0000000..458da95
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,44 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_client_jaas.conf.j2
new file mode 100644
index 0000000..38f9721
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_grant_permissions.j2
new file mode 100644
index 0000000..3378983
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,39 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_master_jaas.conf.j2
new file mode 100644
index 0000000..a93c36c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100644
index 0000000..7097481
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/regionservers.j2 b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/regionservers.j2
new file mode 100644
index 0000000..fc6cc37
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/templates/regionservers.j2
@@ -0,0 +1,20 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in rs_hosts %}{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/alerts.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/alerts.json
deleted file mode 100644
index 80244d2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/alerts.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
-  "FLUME": {
-    "service": [],
-    "FLUME_HANDLER": [
-      {
-        "name": "flume_agent_status",
-        "label": "Flume Agent Status",
-        "description": "This host-level alert is triggerd if any of the expected flume agent processes are not available.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "SCRIPT",
-          "path": "HDP/2.0.6/services/FLUME/package/files/alert_flume_agent_status.py"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/configuration/flume-conf.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/configuration/flume-conf.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/configuration/flume-conf.xml
deleted file mode 100644
index 8ff764b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/configuration/flume-conf.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>content</name>
-    <description>Flume agent configurations. 
-    Specifying the configuration here applies to all hosts in this configuration-group. Please use host specific configuration-groups to provide different configurations on different hosts. 
-    For configuring multiple agents on the same host, provide the combined agent configurations here. Each agent should have a different name.</description>
-    <value>
-# Flume agent config
-    </value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/configuration/flume-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/configuration/flume-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/configuration/flume-env.xml
deleted file mode 100644
index bb56f9c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/configuration/flume-env.xml
+++ /dev/null
@@ -1,79 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>flume_conf_dir</name>
-    <value>/etc/flume/conf</value>
-    <description>Location to save configuration files</description>
-  </property>
-  <property>
-    <name>flume_log_dir</name>
-    <value>/var/log/flume</value>
-    <description>Location to save log files</description>
-  </property>
-  <property>
-    <name>flume_user</name>
-    <value>flume</value>
-    <property-type>USER</property-type>
-    <description>Flume User</description>
-  </property>
-
-  <!-- flume-env.sh -->
-  <property>
-    <name>content</name>
-    <description>This is the jinja template for flume-env.sh file</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced
-# during Flume startup.
-
-# Enviroment variables can be set here.
-
-export JAVA_HOME={{java_home}}
-
-# Give Flume more memory and pre-allocate, enable remote monitoring via JMX
-# export JAVA_OPTS="-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote"
-
-# Note that the Flume conf directory is always included in the classpath.
-#TODO temporary addition
-export FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/*
-
-export HIVE_HOME={{flume_hive_home}}
-export HCAT_HOME={{flume_hcat_home}}
-    </value>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4cd4cb7/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metainfo.xml
index cb05b02..809c6fc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metainfo.xml
@@ -20,49 +20,7 @@
   <services>
     <service>
       <name>FLUME</name>
-      <displayName>Flume</displayName>
-      <comment>A distributed service for collecting, aggregating, and moving large amounts of streaming data into HDFS</comment>
-      <version>1.4.0.2.0</version>
-      <components>
-        <component>
-          <name>FLUME_HANDLER</name>
-          <displayName>Flume</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/flume_handler.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>flume</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/flume_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>flume-env</config-type>
-        <config-type>flume-conf</config-type>
-      </configuration-dependencies>
-
+      <extends>common-services/FLUME/1.4.0.2.0</extends>
     </service>
   </services>
 </metainfo>